hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7995d76e9437d7fff6a8923633feb6dd5b4153f | 14,252 | ipynb | Jupyter Notebook | notebooks/reports/201804_GabrielBogo_AccidentsDataCleaning.ipynb | joinvalle/Joinville-Smart-Mobility | 0b430b4aa7d2839cddf0462a36b3b5f6646e213f | [
"MIT"
] | 6 | 2018-01-19T17:23:46.000Z | 2018-07-26T01:22:52.000Z | notebooks/reports/201804_GabrielBogo_AccidentsDataCleaning.ipynb | pmj-sepud/Joinville-Smart-Mobility | 0b430b4aa7d2839cddf0462a36b3b5f6646e213f | [
"MIT"
] | 4 | 2018-03-28T11:57:46.000Z | 2018-04-02T18:11:16.000Z | notebooks/reports/201804_GabrielBogo_AccidentsDataCleaning.ipynb | pmj-sepud/Joinville-Smart-Mobility | 0b430b4aa7d2839cddf0462a36b3b5f6646e213f | [
"MIT"
] | 2 | 2020-12-23T12:05:43.000Z | 2021-06-14T00:12:54.000Z | 38.518919 | 103 | 0.404294 | [
[
[
"%matplotlib inline\nimport os\nimport sys\nproject_dir = os.path.join(os.pardir, os.pardir)\nsys.path.append(project_dir)\n\nimport dotenv\ndotenv_path = os.path.join(project_dir, '.env')\ndotenv.load_dotenv(dotenv_path)\n\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point\nfrom geojson import Feature\nimport json\n\nfrom src.data.processing_func import (connect_database, extract_geo_sections)\n\npd.options.display.max_columns = 50\npd.options.display.max_rows = 500",
"_____no_output_____"
],
[
"def read_accidents(fp):\n df = (pd.read_csv(fp, encoding=\"latin3\")\n .drop([\"hora\", \"classe_de\", \"codlog\"], axis=1)\n .rename(columns={\"nïż½mero_de\": \"numero_de\"})\n .replace(to_replace=\"\\\\b.*culo\\\\b\",value=\"Obstaculo\",regex=True)\n .replace(to_replace=\"\\\\b.*nibus\\\\b\",value=\"Onibus\",regex=True)\n .replace(to_replace=\"\\\\b[Ss]a.*da\",value=\"Saida\",regex=True)\n .replace(to_replace=\"\\\\b[Cc]aminh.*o\\\\b\",value=\"Caminhao\",regex=True)\n .replace(to_replace=\"\\\\bCarro.*a\\\\b\",value=\"Carroca\",regex=True)\n .pipe(correct_latlon)\n .pipe(extract_parts)\n .pipe(generalize_entity)\n .pipe(add_geometry)\n .pipe(convert_into_gdf)\n .pipe(add_geojson)\n .pipe(name_index)\n )\n return df\n\ndef correct_latlon(df):\n df.loc[df.X > 10**7,\"X\"] /= 1000\n df.loc[df.Y > 10**8,\"Y\"] /= 1000\n return df\n\ndef extract_parts(df):\n df = (pd.concat([pd.DataFrame(data=np.tile(row.values,(len(row['tipo'].split(' x ')),1)),\n columns=row.index,\n index=(row['tipo'].split(' x '))) \n for _, row in df.iterrows()])\n .reset_index()\n .rename(columns={\"index\": \"entidade\"})\n )\n return df\n\ndef generalize_entity(df):\n df.loc[df['entidade'].str.contains('[Mm]oto'), 'entidade'] = 'Moto'\n df.loc[df['entidade'].str.contains('[Bb]icicleta'), 'entidade'] = 'Bicicleta'\n return df\n \ndef add_geometry(df):\n df[\"geometry\"] = df.apply(lambda row: Point((row[\"X\"], row[\"Y\"])), axis=1)\n return df\n\ndef convert_into_gdf(df): \n crs = \"+proj=utm +zone=22J, +south +ellps=WGS84 +datum=WGS84 +units=m +no_defs\"\n gdf = gpd.GeoDataFrame(df, crs=crs, geometry=\"geometry\")\n gdf = gdf.to_crs({'init': 'epsg:4326'})\n return gdf\n\ndef add_geojson(df):\n df[\"Longitude\"] = df.apply(lambda row: row.geometry.coords[0][0], axis=1)\n df[\"Latitude\"] = df.apply(lambda row: row.geometry.coords[0][1], axis=1)\n df[\"geojson\"] = df.apply(lambda row: json.dumps(Feature(geometry=row.geometry)), axis=1)\n return df\n\ndef name_index(df):\n df.index.name = \"id\"\n return df\n\ndf_accidents = read_accidents(project_dir + \"/data/external/bombeiros_acidentes2015.csv\")\ndf_accidents.head()",
"_____no_output_____"
],
[
"df_accidents.to_csv(project_dir + \"/data/processed/processed_accidents.csv\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7996faeca9d021ad469bc81e18f78b020731707 | 44,002 | ipynb | Jupyter Notebook | feature_selection.ipynb | xzhou016/feature_selection | 770991c033be77a18183249c42c9a93ce42eba68 | [
"MIT"
] | null | null | null | feature_selection.ipynb | xzhou016/feature_selection | 770991c033be77a18183249c42c9a93ce42eba68 | [
"MIT"
] | null | null | null | feature_selection.ipynb | xzhou016/feature_selection | 770991c033be77a18183249c42c9a93ce42eba68 | [
"MIT"
] | null | null | null | 44.223116 | 163 | 0.593905 | [
[
[
"import math\n\n#done\ndef convert_to_float(not_float):\n float_list = []\n for i in not_float:\n float_list.append([float(j) for j in i])\n return float_list",
"_____no_output_____"
],
[
"def process_file(raw_file_name):\n raw_file = open(raw_file_name, 'r')\n str_data = []\n with open(raw_file_name) as file:\n raw_data = file.readlines()\n str_data = [i.split() for i in raw_data]\n return convert_to_float(str_data)",
"_____no_output_____"
],
[
"#done\ndef take_out_classification(data):\n classfication = []\n for i in data:\n classfication.append(i.pop(0))\n return classfication",
"_____no_output_____"
],
[
"#done\ndef add_back_classification(data, list_of_classfication):\n for i in range(0, len(data)):\n data[i].insert(0, float(list_of_classfication[i]))",
"_____no_output_____"
],
[
"#done\ndef getmean(data, features, instances):\n mean_list = []\n column = 0\n #classif = take_out_classification(data)\n for i in range(features):\n mean = sum(row[column] for row in data)/instances\n column = column + 1\n mean_list.append(mean)\n #add_back_classification(data, classif)\n return mean_list",
"_____no_output_____"
],
[
"#done\ndef getstd(data, mean_list, features, instances):\n standard_d_list = []\n #classif = take_out_classification(data)\n mean_list = getmean(data, features, instances)\n for i in range(features):\n standard_d = math.sqrt((sum(pow((row[i] - mean_list[i]), 2) for row in data)) / instances)\n standard_d_list.append(standard_d)\n #add_back_classification(data, classif)\n #df = pd.DataFrame(standard_d_list)\n #print(df)\n return standard_d_list",
"_____no_output_____"
],
[
"#done\ndef scale(data):\n classif = take_out_classification(data)\n features = len(data[0])\n instances = len(data)\n #print(instances)\n mean_list = getmean(data, features , instances)\n standard_d_list = getstd(data, mean_list, features, instances )\n #df = pd.DataFrame(data)\n #print(df)\n \n for i in range(0, instances):\n for j in range(features):\n #print(\"in loop\")\n #print(data[i][j])\n data[i][j] = (data[i][j] - mean_list[j])/ standard_d_list[j]\n add_back_classification(data, classif)\n return data",
"_____no_output_____"
],
[
"#done\ndef euclideanDistance(instance_a, instance_b, features):\n #remove classfication for calculation\n #print(instance_a)\n classification_1 = instance_a.pop(0)\n classification_2 = instance_b.pop(0)\n \n distance = 0\n #print(repr(instance_a) +\"\\n\"+ repr(instance_b))\n for i in range(len(features)):\n #print(instance_b)\n #print(features[i])\n #print(instance_b[features[i]])\n # print(\"instance a = \" + repr(instance_a[i]) + \" and instance b = \" + repr(instance_b[i]))\n distance = distance + pow((instance_a[features[i]] - instance_b[features[i]]), 2)\n # print(\"and the distance is: \" + repr(distance) + \"\\n\")\n \n #add classification back\n instance_a.insert(0, classification_1)\n instance_b.insert(0, classification_2)\n return math.sqrt(distance)",
"_____no_output_____"
],
[
"def nearest_neighbor(data, test, features):\n best_neighbor = [-1, -1]\n distance = float('inf')\n test_distance = float('inf')\n for i in data:\n if len(features) > 0:\n test_distance = euclideanDistance(i, test, features)\n if test_distance < distance:\n distance = test_distance\n #print(i[0])\n best_neighbor[0] = i[0]\n #print(test_distance)\n best_neighbor[1]= test_distance\n return best_neighbor",
"_____no_output_____"
],
[
"import numpy as np\ndef compute_accuracy(data,feature_set, feature):\n\n #print(feature)\n list_feature = list(feature_set)\n accuracy = 0\n test = []\n neighbor = []\n if feature < 0:\n feature_set.discard(abs(feature))\n list_feature = list(feature_set)\n feature_set.add(abs(feature))\n elif feature > 0:\n feature_set.add(abs(feature))\n list_feature = list(feature_set)\n feature_set.discard(abs(feature))\n #random(data)\n #count = 1\n for i in range(220):\n #count = count + 1\n np.random.shuffle(data)\n #print(data)\n test = data.pop(0)\n #print(\"test \" + repr(test[0]))\n neighbor = nearest_neighbor(data, test, list_feature)\n #print(\"Neighbor \" + repr(neighbor))\n #print(\"found \" + repr(nearest_neighbor(data, test, feature)[0]))\n if neighbor[0] == test[0]:\n accuracy = accuracy + 1\n data.append(test)\n #print(count)\n accuracy = accuracy/221\n return accuracy * 100",
"_____no_output_____"
],
[
"def forward_selection(data):\n print(\"Froward selection\")\n features = len(data[0]) - 1\n feature_set = set()\n best_accuracy = 0\n #print(best_accuracy)\n for i in range(features):\n print(\"At level: \" + repr(i+1))\n add_this = -1\n for j in range(i, features):\n if j not in feature_set:\n accuracy = compute_accuracy(data, feature_set, j)\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n add_this = j\n if add_this > 0:\n feature_set.add(add_this)\n print(\"Best accuracy so far: \" + \"%.2f\" % best_accuracy + \" %\" + \" With feature set: \" + repr(feature_set))\n else:\n print(\"(Warning, Accuracy has decreased! Continuing search in case of local maxima)\")\n print(\"This is what I got: \")\n print(\"With feature set: \" + repr(feature_set)+ \" Best accuracy: \" + \"%.2f\" % best_accuracy + \" %\")\n \n return feature_set",
"_____no_output_____"
],
[
"def backward_elimination(data):\n print(\"Backward elimination\")\n features = len(data[0]) - 1\n feature_set = set(i for i in range(features))\n #print(feature_set)\n best_accuracy = 0\n #print(best_accuracy)\n for i in range(features):\n print(\"At level: \" + repr(i+1))\n remove_this = -1\n for j in range(features):\n if j in feature_set:\n accuracy = compute_accuracy(data, feature_set, -1*j)\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n remove_this = j\n if remove_this > 0:\n print(\"Removing feature: \" + repr(remove_this))\n feature_set.discard(remove_this)\n print(\"Best accuracy so far: \" + \"%.2f\" % best_accuracy + \" %\" + \" With feature set: \" + repr(feature_set))\n else:\n print(\"Warning, Accuracy has decreased! Breaking out!\")\n break\n print(\"This is what I got: \")\n print(\"With feature set: \" + repr(feature_set)+ \" Best accuracy: \" + \"%.2f\" % best_accuracy + \" %\")\n return feature_set",
"_____no_output_____"
],
[
"def custom_selection(data):\n feature_set = forward_selection(data)\n feature_set2 = set()\n feature_set2 = feature_set2.union(feature_set)\n \n features = len(data[0]) - 1\n #print(feature_set)\n best_accuracy = 0\n #print(best_accuracy)\n for i in range(features):\n print(\"At level: \" + repr(i+1))\n remove_this = -1\n for j in range(features):\n if j in feature_set:\n accuracy = compute_accuracy(data, feature_set, -1*j)\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n remove_this = j\n if remove_this > 0:\n print(\"Removing feature: \" + repr(remove_this))\n feature_set.discard(remove_this)\n print(\"Best accuracy so far: \" + \"%.2f\" % best_accuracy + \" %\" + \" With feature set: \" + repr(feature_set))\n else:\n print(\"Warning, Accuracy has decreased! Breaking out!\")\n break\n print(\"This is what I got: \")\n print(\"With feature set: \" + repr(feature_set)+ \" Best accuracy: \" + \"%.2f\" % best_accuracy + \" %\")\n print(\"Trivial feature/s: \" + repr(feature_set2.difference(feature_set)))",
"_____no_output_____"
],
[
"def entry_func():\n print(\"Welcome to Xiao's feature search\")\n file_name = input(\"Please enter file name: \" )\n file_name = str(file_name)\n while True:\n search = input(\"Please select search algorithm:\\n1 for Forward selection\\n2 for Backward elimination\\n3 for Custom algorithm\\n4 to exit\\n\")\n search = int(search)\n if search == 4:\n print(\"Goodbye!\")\n break\n print(\"Normalizing data...\")\n raw_file_open = file_name\n data = process_file(raw_file_open)\n scaled_data = scale(data)\n print(\"Done\")\n if search == 1:\n features = forward_selection(scaled_data)\n if search == 2:\n features = backward_elimination(scaled_data)\n if search == 3:\n custom_selection(scaled_data)\n ",
"_____no_output_____"
],
[
"entry_func()",
"Welcome to Xiao's feature search\nPlease enter file name: CS170BIGtestdata__27.txt\nPlease select search algorithm:\n1 for Forward selection\n2 for Backward elimination\n3 for Custom algorithm\n4 to exit\n1\nNormalizing data...\nDone\nFroward selection\nAt level: 1\nBest accuracy so far: 88.24 % With feature set: {31}\nAt level: 2\nBest accuracy so far: 95.93 % With feature set: {3, 31}\nAt level: 3\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 4\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 5\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 6\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 7\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 8\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 9\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 10\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 11\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 12\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 13\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 14\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 15\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 16\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 17\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 18\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 19\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 20\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 21\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 22\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 23\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 24\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 25\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 26\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 27\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 28\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 29\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 30\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 31\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 32\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 33\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 34\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 35\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 36\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 37\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 38\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 39\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 40\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 41\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 42\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 43\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 44\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 45\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 46\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 47\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 48\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 49\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 50\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nThis is what I got: \nWith feature set: {3, 31} Best accuracy: 95.93 %\nPlease select search algorithm:\n1 for Forward selection\n2 for Backward elimination\n3 for Custom algorithm\n4 to exit\n1\nNormalizing data...\nDone\nFroward selection\nAt level: 1\nBest accuracy so far: 86.43 % With feature set: {31}\nAt level: 2\nBest accuracy so far: 92.31 % With feature set: {3, 31}\nAt level: 3\nBest accuracy so far: 94.57 % With feature set: {34, 3, 31}\nAt level: 4\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 5\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 6\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 7\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 8\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 9\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 10\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 11\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 12\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 13\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 14\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 15\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 16\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 17\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 18\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 19\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 20\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 21\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 22\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 23\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 24\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 25\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 26\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 27\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 28\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 29\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 30\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 31\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 32\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 33\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 34\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 35\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 36\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 37\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 38\n(Warning, Accuracy has decreased! Continuing search in case of local maxima)\nAt level: 39\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79979c39377f9ba5eb0c12502be8b9adcddbfcd | 77,460 | ipynb | Jupyter Notebook | examples/ipynb_demos/pytc/.ipynb_checkpoints/Basic_API-checkpoint.ipynb | espottesmith/pyGSM | 5bf263f9ef6cbee3ec16355c5eb1839446e704e7 | [
"MIT"
] | null | null | null | examples/ipynb_demos/pytc/.ipynb_checkpoints/Basic_API-checkpoint.ipynb | espottesmith/pyGSM | 5bf263f9ef6cbee3ec16355c5eb1839446e704e7 | [
"MIT"
] | null | null | null | examples/ipynb_demos/pytc/.ipynb_checkpoints/Basic_API-checkpoint.ipynb | espottesmith/pyGSM | 5bf263f9ef6cbee3ec16355c5eb1839446e704e7 | [
"MIT"
] | null | null | null | 58.41629 | 6,834 | 0.548554 | [
[
[
"# pyGSM (Python + GSM) \n\npyGSM uses the powerful tools of python to allow for rapid prototyping and improved readability.\n\n* Reduction in number of lines ~12,000 vs ~30,000 and individual file size \n* Highly object oriented \n* Easy to read/use/prototype. No compiling!\n* No loss in performance since it uses high-performing numerical libraries \n* Interactive coding ",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.insert(0,'/home/caldaz/module/pyGSM')\nfrom molecule import Molecule\nfrom pes import PES\nfrom avg_pes import Avg_PES\nimport numpy as np\nfrom nifty import pvec1d,pmat2d\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom pytc import *\nimport manage_xyz\nfrom rhf_lot import *\nfrom psiw import *\nfrom nifty import getAllCoords,getAtomicSymbols,click,printcool\nimport pybel as pb\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## 1. Building the pyTC objects",
"_____no_output_____"
]
],
[
[
"printcool(\"Build resources\")\nresources = ls.ResourceList.build()\nprintcool('{}'.format(resources))",
"#========================================================#\n#| \u001b[92m Build resources \u001b[0m |#\n#========================================================#\n#==============================================================#\n#| \u001b[92m ResourceList: \u001b[0m |#\n#| \u001b[92m CPU threads: 1 \u001b[0m |#\n#| \u001b[92m GPU contexts: 1 \u001b[0m |#\n#| \u001b[92m GPU context details: \u001b[0m |#\n#| \u001b[92m N ID Name CC CPU Buffer GPU Buffer \u001b[0m |#\n#| \u001b[92m 0 0 GeForce GTX TITAN 3.5 1024 1024 \u001b[0m |#\n#| \u001b[92m \u001b[0m |#\n#==============================================================#\n"
],
[
"printcool(\"build the Lightspeed (pyTC) objecs\")\n\nfilepath='data/ethylene.xyz'\nmolecule = ls.Molecule.from_xyz_file(filepath)\ngeom = geometry.Geometry.build(\n resources=resources,\n molecule=molecule,\n basisname='6-31gs',\n )\nprintcool('{}'.format(geom))\n\nref = RHF.from_options(\n geometry= geom, \n g_convergence=1.0E-6,\n fomo=True,\n fomo_method='gaussian',\n fomo_temp=0.3,\n fomo_nocc=7,\n fomo_nact=2,\n print_level=1,\n )\nref.compute_energy()\ncasci = CASCI.from_options(\n reference=ref,\n nocc=7,\n nact=2,\n nalpha=1,\n nbeta=1,\n S_inds=[0],\n S_nstates=[2],\n print_level=1,\n )\ncasci.compute_energy()\npsiw = CASCI_LOT.from_options(\n casci=casci,\n rhf_guess=True,\n rhf_mom=True,\n orbital_coincidence='core',\n state_coincidence='full',\n )",
"#========================================================#\n#| \u001b[92m build the Lightspeed (pyTC) objecs \u001b[0m |#\n#========================================================#\n#========================================================#\n#| \u001b[92m Geometry: \u001b[0m |#\n#| \u001b[92m QMMM = False \u001b[0m |#\n#| \u001b[92m -D3 = False \u001b[0m |#\n#| \u001b[92m ECP = False \u001b[0m |#\n#| \u001b[92m \u001b[0m |#\n#| \u001b[92m Molecule: ethylene \u001b[0m |#\n#| \u001b[92m Natom = 6 \u001b[0m |#\n#| \u001b[92m Charge = 0.000 \u001b[0m |#\n#| \u001b[92m Multiplicity = 1.000 \u001b[0m |#\n#| \u001b[92m \u001b[0m |#\n#| \u001b[92m Basis: 6-31gs \u001b[0m |#\n#| \u001b[92m nao = 38 \u001b[0m |#\n#| \u001b[92m ncart = 38 \u001b[0m |#\n#| \u001b[92m nprim = 46 \u001b[0m |#\n#| \u001b[92m nshell = 20 \u001b[0m |#\n#| \u001b[92m natom = 6 \u001b[0m |#\n#| \u001b[92m pure? = No \u001b[0m |#\n#| \u001b[92m max L = 2 \u001b[0m |#\n#| \u001b[92m \u001b[0m |#\n#| \u001b[92m Basis: cc-pvdz-minao \u001b[0m |#\n#| \u001b[92m nao = 14 \u001b[0m |#\n#| \u001b[92m ncart = 14 \u001b[0m |#\n#| \u001b[92m nprim = 60 \u001b[0m |#\n#| \u001b[92m nshell = 10 \u001b[0m |#\n#| \u001b[92m natom = 6 \u001b[0m |#\n#| \u001b[92m pure? = Yes \u001b[0m |#\n#| \u001b[92m max L = 1 \u001b[0m |#\n#| \u001b[92m \u001b[0m |#\n#========================================================#\n==> RHF <==\n\nExternal Environment:\n Enuc = 3.3333038401617195E+01\n Eext = 3.3333038401617195E+01\n\nSCF Iterations:\n\nIter: Energy dE dG Time[s]\n 0: -7.7092708661464698E+01 -7.709E+01 1.143E+00 0.841\n 1: -7.8147981534974207E+01 -1.055E+00 6.345E-02 0.050\n 2: -7.8169965996087384E+01 -2.198E-02 1.559E-02 0.050\n 3: -7.8172283892637140E+01 -2.318E-03 7.279E-03 0.049\n 4: -7.8172545611657412E+01 -2.617E-04 4.307E-04 0.048\n 5: -7.8172547344654049E+01 -1.733E-06 3.867E-05 0.046\n 6: -7.8172547366554227E+01 -2.190E-08 7.338E-06 0.041\n 7: -7.8172547366986933E+01 -4.327E-10 1.649E-06 0.037\n 8: -7.8172547366784329E+01 2.026E-10 1.337E-07 0.047\n\nSCF Converged\n\nSCF Energy = -7.8172547366784329E+01\n\nSCF Internal Energy (E) = -7.7792973600257000E+01\nSCF Entropy Term (-T * S) = -3.7957376652733060E-01\nSCF Free Energy (E - T * S) = -7.8172547366784329E+01\n\n==> End RHF <==\n\n==> CASCI <==\n\nExternal Environment:\n Enuc = 3.3333038401617195E+01\n Eext = 3.3333038401617195E+01\n\nCore Energy = -76.85572592289878\n\n=> S=0 States <=\n\nCASCI S=0 Energies:\n\n I: Total E\n 0: -7.8050956144574869E+01\n 1: -7.7668325171082003E+01\n\n=> End S=0 States <=\n\n==> End CASCI <==\n\n"
]
],
[
[
"## Section 2: Building the pyGSM Objects",
"_____no_output_____"
]
],
[
[
"printcool(\"Build the pyGSM Level of Theory object (LOT)\")\nlot=PyTC.from_options(states=[(1,0),(1,1)],psiw=psiw,do_coupling=True,fnm=filepath)",
"#========================================================#\n#| \u001b[92m Build the pyGSM Level of Theory object (LOT) \u001b[0m |#\n#========================================================#\n initializing LOT from file\n"
],
[
"printcool(\"Build the pyGSM Potential Energy Surface Object (PES)\")\npes1 = PES.from_options(lot=lot,ad_idx=0,multiplicity=1)\npes2 = PES.from_options(lot=lot,ad_idx=1,multiplicity=1)\npes = Avg_PES(pes1,pes2,lot=lot)",
"#===========================================================#\n#| \u001b[92m Build the pyGSM Potential Energy Surface Object (PES) \u001b[0m |#\n#===========================================================#\n"
],
[
"printcool(\"Build the pyGSM Molecule object \\n with Translation and Rotation Internal Coordinates (TRIC)\")\nM = Molecule.from_options(fnm=filepath,PES=pes,coordinate_type=\"TRIC\")",
"#================================================================#\n#| \u001b[92m Build the pyGSM Molecule object \u001b[0m |#\n#| \u001b[92m with Translation and Rotation Internal Coordinates (TRIC) \u001b[0m |#\n#================================================================#\n reading cartesian coordinates from file\n making primitives from options!\n[NodeView((0, 1, 2, 3, 4, 5))]\n making primitive Hessian\n forming Hessian in basis\n"
]
],
[
[
"## Section 3: API of Molecule Class ",
"_____no_output_____"
]
],
[
[
"print(M)",
" molecule object\nOption:\n Key: fnm\n Value: data/ethylene.xyz\n Required: False\n Allowed Types: [<type 'str'>]\n Allowed Values: None\n Doc: File name to create the Molecule object from. Only used if geom is none.\n\nOption:\n Key: ftype\n Value: xyz\n Required: False\n Allowed Types: [<type 'str'>]\n Allowed Values: None\n Doc: filetype (optional) will attempt to read filetype if not given\n\nOption:\n Key: coordinate_type\n Value: TRIC\n Required: False\n Allowed Types: None\n Allowed Values: ['Cartesian', 'DLC', 'HDLC', 'TRIC']\n Doc: The type of coordinate system to build\n\nOption:\n Key: coord_obj\n Value: Internal coordinate system (atoms numbered from 1):\nDistance 1-2\nDistance 1-3\nDistance 1-4\nDistance 4-5\nDistance 4-6\nAngle 2-1-4\nAngle 3-1-4\nAngle 1-4-6\nAngle 5-4-6\nOut-of-Plane 1-2-3-4\nOut-of-Plane 4-1-5-6\nDihedral 2-1-4-5\nDihedral 2-1-4-6\nDihedral 3-1-4-5\nDihedral 3-1-4-6\nTranslation-X 1-6\nTranslation-Y 1-6\nTranslation-Z 1-6\nRotation-A 1-6\nRotation-B 1-6\nRotation-C 1-6\n<class 'slots.Distance'> : 5\n<class 'slots.Angle'> : 4\n<class 'slots.OutOfPlane'> : 2\n<class 'slots.Dihedral'> : 4\n<class 'slots.TranslationX'> : 1\n<class 'slots.TranslationY'> : 1\n<class 'slots.TranslationZ'> : 1\n<class 'slots.RotationA'> : 1\n<class 'slots.RotationB'> : 1\n<class 'slots.RotationC'> : 1\n Required: False\n Allowed Types: [<class 'dlc_new.DelocalizedInternalCoordinates'>, <class 'cartesian.CartesianCoordinates'>]\n Allowed Values: None\n Doc: A coordinate object.\n\nOption:\n Key: geom\n Value: None\n Required: False\n Allowed Types: [<type 'list'>]\n Allowed Values: None\n Doc: geometry including atomic symbols\n\nOption:\n Key: xyz\n Value: [[ 0. 0.6655 -0. ]\n [-0.9236 1.2396 0. ]\n [ 0.9236 1.2396 0. ]\n [ 0. -0.6655 -0. ]\n [ 0.9236 -1.2396 0. ]\n [-0.9236 -1.2396 0. ]]\n Required: False\n Allowed Types: [<type 'numpy.ndarray'>]\n Allowed Values: None\n Doc: The Cartesian coordinates in Angstrom\n\nOption:\n Key: PES\n Value: <avg_pes.Avg_PES object at 0x7f42f3f67210>\n Required: True\n Allowed Types: [<class 'pes.PES'>, <class 'avg_pes.Avg_PES'>, <class 'penalty_pes.Penalty_PES'>]\n Allowed Values: None\n Doc: potential energy surface object to evaulate energies, gradients, etc. Pes is defined by charge, state, multiplicity,etc. \n\nOption:\n Key: Primitive_Hessian\n Value: [[ 0.35 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0.35 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0.35 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0.35 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0.35 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0.16 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0.16 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0.16 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0.16 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.045 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.045\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.023 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0.023 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0.023 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0.023 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0.05 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.05 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.05 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0.05 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0.05 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.05 ]]\n Required: False\n Allowed Types: None\n Allowed Values: None\n Doc: Primitive hessian save file for doing optimization.\n\nOption:\n Key: Hessian\n Value: [[ 0.05 0. 0. -0. 0. -0. 0. 0. -0. 0.\n 0. 0. -0. 0. -0. -0. -0. -0. ]\n [ 0. 0.05 0. 0. -0. -0. 0. 0. -0. 0.\n 0. 0. -0. 0. -0. -0. 0. -0. ]\n [ 0. 0. 0.05 0. 0. -0. 0. 0. -0. 0.\n 0. 0. -0. 0. -0. -0. 0. -0. ]\n [-0. 0. 0. 0.05 -0. 0. 0. -0. 0. -0.\n 0. 0. -0. -0. 0. 0. 0. -0. ]\n [ 0. -0. 0. -0. 0.05 -0. -0. 0. 0. -0.\n 0. -0. 0. 0. 0. 0. -0. 0. ]\n [-0. -0. -0. 0. -0. 0.2612 0. 0.0586 0.0378\n -0.0102 -0.0137 -0.0247 0.0387 -0. 0.0408 -0.0079 -0. 0. ]\n [ 0. 0. 0. 0. -0. 0. 0.05 -0. 0. -0.\n -0. -0. 0. 0. 0. -0. 0. -0. ]\n [ 0. 0. 0. -0. 0. 0.0586 -0. 0.2269 0.0258\n 0.0235 -0.0181 0.0479 0.0197 -0. 0.0138 -0.0196 0. -0. ]\n [-0. -0. -0. 0. 0. 0.0378 0. 0.0258 0.2917\n 0.0003 0.0086 -0.0131 -0.0719 0. -0.0129 0.0033 -0. -0. ]\n [ 0. 0. 0. -0. -0. -0.0102 -0. 0.0235 0.0003\n 0.3023 0.0082 -0.01 0.0119 0. -0.0469 -0.0602 -0. 0. ]\n [ 0. 0. 0. 0. 0. -0.0137 -0. -0.0181 0.0086\n 0.0082 0.3234 0.0009 -0.0119 -0. 0.0509 -0.031 0. -0. ]\n [ 0. 0. 0. 0. -0. -0.0247 -0. 0.0479 -0.0131\n -0.01 0.0009 0.3292 -0.0171 -0. 0.0051 -0.0045 -0. 0. ]\n [-0. -0. -0. -0. 0. 0.0387 0. 0.0197 -0.0719\n 0.0119 -0.0119 -0.0171 0.2411 -0. 0.0311 -0.0156 -0. -0. ]\n [ 0. 0. 0. -0. 0. -0. 0. -0. 0. 0.\n -0. -0. -0. 0.023 -0. -0. -0. 0. ]\n [-0. -0. -0. 0. 0. 0.0408 0. 0.0138 -0.0129\n -0.0469 0.0509 0.0051 0.0311 -0. 0.2201 0.001 0. -0. ]\n [-0. -0. -0. 0. 0. -0.0079 -0. -0.0196 0.0033\n -0.0602 -0.031 -0.0045 -0.0156 -0. 0.001 0.1941 -0. -0. ]\n [-0. 0. 0. 0. -0. -0. 0. 0. -0. -0.\n 0. -0. -0. -0. 0. -0. 0.0282 0. ]\n [-0. -0. -0. -0. 0. 0. -0. -0. -0. 0.\n -0. 0. -0. 0. -0. -0. 0. 0.0282]]\n Required: False\n Allowed Types: None\n Allowed Values: None\n Doc: Hessian save file in the basis of coordinate_type.\n\nOption:\n Key: comment\n Value: \n Required: False\n Allowed Types: None\n Allowed Values: None\n Doc: A string that is saved on the molecule, used for descriptive purposes\n\n\n"
],
[
"print(\"printing gradient\")\npvec1d(M.gradient,5,'f')",
"printing gradient\n 0.00000 -0.00000 0.00000 -0.00000 -0.00000 -0.08843 0.00000 -0.05459 0.01673 0.00107 -0.04507 0.00125 -0.07016 0.00000 -0.06895 0.02654 -0.00000 0.00000 \n"
],
[
"M.energy",
"_____no_output_____"
],
[
"print(\"primitive internal coordinates\")\nprint(M.primitive_internal_coordinates)",
"primitive internal coordinates\n[Distance 1-2, Distance 1-3, Distance 1-4, Distance 4-5, Distance 4-6, Angle 2-1-4, Angle 3-1-4, Angle 1-4-6, Angle 5-4-6, Out-of-Plane 1-2-3-4, Out-of-Plane 4-1-5-6, Dihedral 2-1-4-5, Dihedral 2-1-4-6, Dihedral 3-1-4-5, Dihedral 3-1-4-6, Translation-X 1-6, Translation-Y 1-6, Translation-Z 1-6, Rotation-A 1-6, Rotation-B 1-6, Rotation-C 1-6]\n"
],
[
"printcool(\"primitive number of internal coordinates\")\nprint(M.num_primitives)",
"#========================================================#\n#| \u001b[92m primitive number of internal coordinates \u001b[0m |#\n#========================================================#\n21\n"
],
[
"printcool(\"getting the value of a primitive 0\")\nprint(M.primitive_internal_coordinates[0].value(M.xyz))",
"#========================================================#\n#| \u001b[92m getting the value of a primitive 0 \u001b[0m |#\n#========================================================#\n1.08749845303\n"
],
[
"printcool(\"printing coordinates in basis\")\npmat2d(M.coordinates.T,format='f')",
"#========================================================#\n#| \u001b[92m printing coordinates in basis \u001b[0m |#\n#========================================================#\n-0.0 0.0 0.0 0.0 0.0 0.9 -0.0 3.8 -0.2 0.2 -0.9 -1.2 -2.4 -0.0 0.7 0.6 3.9 0.0 \n"
],
[
"printcool(\"printing coordinate basis vectors\")\npmat2d(M.coord_basis,format='f')",
"#========================================================#\n#| \u001b[92m printing coordinate basis vectors \u001b[0m |#\n#========================================================#\n-0.0 -0.0 -0.0 0.0 -0.0 0.1 0.0 0.1 0.6 -0.5 0.1 0.3 -0.4 -0.0 0.1 0.2 -0.0 -0.0 \n-0.0 -0.0 -0.0 -0.0 -0.0 0.1 0.0 0.1 0.6 0.5 0.1 -0.5 -0.2 0.0 -0.2 -0.2 -0.0 -0.0 \n-0.0 -0.0 -0.0 0.0 -0.0 0.7 0.0 0.4 0.1 0.0 0.2 -0.0 0.4 -0.0 0.4 -0.2 0.0 -0.0 \n-0.0 -0.0 -0.0 0.0 -0.0 0.2 0.0 -0.0 0.0 -0.3 -0.7 -0.5 0.1 -0.0 -0.1 0.3 -0.0 0.0 \n 0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 0.4 0.0 0.4 -0.5 0.6 -0.0 0.0 -0.3 -0.1 -0.0 0.0 \n 0.0 0.0 0.0 -0.0 -0.0 0.1 -0.0 0.4 -0.4 -0.4 0.1 -0.2 -0.4 0.0 -0.4 -0.4 0.0 0.0 \n-0.0 -0.0 -0.0 0.0 -0.0 0.4 0.0 -0.0 -0.4 0.3 0.0 0.0 -0.6 -0.0 0.2 0.5 0.0 0.0 \n 0.0 0.0 0.0 -0.0 0.0 -0.4 -0.0 0.6 0.0 0.0 0.2 -0.2 0.2 -0.0 -0.1 0.6 -0.0 -0.0 \n 0.0 0.0 0.0 -0.0 0.0 -0.4 -0.0 0.3 -0.0 0.0 -0.3 -0.2 -0.3 -0.0 0.7 -0.3 0.0 -0.0 \n-0.0 0.0 0.0 0.0 0.0 0.0 -0.0 0.0 -0.0 -0.0 0.0 -0.0 -0.0 0.0 -0.0 0.0 0.3 0.3 \n-0.0 0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 0.0 0.3 -0.3 \n-0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 -0.0 -0.5 -0.0 -0.0 -0.6 -0.0 \n 0.0 -0.0 -0.0 0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 -0.0 -0.0 -0.0 -0.5 -0.0 -0.0 0.0 -0.6 \n 0.0 -0.0 -0.0 0.0 -0.0 -0.0 0.0 0.0 0.0 0.0 0.0 -0.0 0.0 -0.5 -0.0 -0.0 -0.0 0.6 \n 0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 0.0 0.0 -0.5 -0.0 -0.0 0.6 0.0 \n-0.1 -1.0 -0.1 -0.0 0.0 -0.0 -0.0 0.0 0.0 0.0 0.0 -0.0 0.0 -0.0 -0.0 0.0 -0.0 -0.0 \n 0.3 -0.1 0.9 0.0 0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 0.0 0.0 -0.0 0.0 -0.0 -0.0 -0.0 \n-0.9 0.0 0.3 0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 \n-0.0 0.0 -0.0 0.0 1.0 0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 \n 0.0 -0.0 0.0 0.0 -0.0 0.0 -1.0 -0.0 -0.0 0.0 -0.0 0.0 -0.0 -0.0 0.0 -0.0 -0.0 0.0 \n 0.0 0.0 0.0 -1.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 0.0 -0.0 -0.0 -0.0 0.0 \n"
],
[
"printcool(\"Wilson B-Matrix (dq_i/dx_j)\")\nBmatp = M.coord_obj.Prims.wilsonB(M.xyz)\nplt.imshow(Bmatp, cmap=plt.cm.get_cmap('RdBu'))\nplt.show()",
"#========================================================#\n#| \u001b[92m Wilson B-Matrix (dq_i/dx_j) \u001b[0m |#\n#========================================================#\n"
],
[
"M.coord_obj.Prims.orderedmakePrimitives(M.xyz,M.coord_obj.options)",
"0 1\n0 2\n0 3\n3 4\n3 5\n"
],
[
"printcool(\"Wilson B-Matrix in coordinate basis\")\nBmat = M.coord_obj.wilsonB(M.xyz)\nplt.imshow(Bmat, cmap=plt.cm.get_cmap('RdBu'))\nplt.show()",
"#========================================================#\n#| \u001b[92m Wilson B-Matrix in coordinate basis \u001b[0m |#\n#========================================================#\n"
],
[
"printcool(\"G-Matrix (BB^T in basis of prims)\")\nG = M.coord_obj.Prims.GMatrix(M.xyz)\nplt.imshow(G, cmap=plt.cm.get_cmap('RdBu'))\nplt.show()",
"#========================================================#\n#| \u001b[92m G-Matrix (BB^T in basis of prims) \u001b[0m |#\n#========================================================#\n"
],
[
"printcool(\"G-Matrix in coordinate basis\")\nG = M.coord_obj.GMatrix(M.xyz)\nplt.imshow(G, cmap=plt.cm.get_cmap('RdBu'))\nplt.show()",
"#========================================================#\n#| \u001b[92m G-Matrix in coordinate basis \u001b[0m |#\n#========================================================#\n"
],
[
"dq = np.zeros((M.num_coordinates,1))\ndq[0]=0.1\nprintcool(\"taking step and printing new xyz,geom and coordinates\")\nM.update_xyz(dq)\nprint\nprint(\"new coordinates (in basis)\")\npmat2d(M.coordinates.T,format='f')\nprint\nprint(\"new xyz\")\npmat2d(M.xyz,4,format='f')\nprint\nprint(\"new geometry\")\nfor atom in M.geometry:\n print(\"%-2s %5.4f %5.4f %5.4f\") % (atom[0],atom[1],atom[2],atom[3])",
"#===========================================================#\n#| \u001b[92m taking step and printing new xyz,geom and coordinates \u001b[0m |#\n#===========================================================#\n Iter: 1 Err-dQ = 9.86456e-15 RMSD: 5.77350e-02 Damp: 1.00000e+00\n Cartesian coordinates obtained after 1 microiterations (rmsd = 5.774e-02 |dQ| = 9.865e-15)\n\nnew coordinates (in basis)\n 0.1 0.0 0.0 0.0 0.0 0.9 -0.0 3.8 -0.2 0.2 -0.9 -1.2 -2.4 -0.0 0.7 0.6 3.9 0.0 \n\nnew xyz\n-0.0069 0.6993 -0.0939 \n-0.9305 1.2734 -0.0939 \n 0.9167 1.2734 -0.0939 \n-0.0069 -0.6317 -0.0939 \n 0.9167 -1.2058 -0.0939 \n-0.9305 -1.2058 -0.0939 \n\nnew geometry\nC -0.0069 0.6993 -0.0939\nH -0.9305 1.2734 -0.0939\nH 0.9167 1.2734 -0.0939\nC -0.0069 -0.6317 -0.0939\nH 0.9167 -1.2058 -0.0939\nH -0.9305 -1.2058 -0.0939\n"
],
[
"# update coordinate basis\nprintcool(\"update coordinate basis\")\npmat2d(M.update_coordinate_basis(),format='f') #also used with constraints",
"#========================================================#\n#| \u001b[92m update coordinate basis \u001b[0m |#\n#========================================================#\n-0.0 0.0 -0.0 -0.0 0.0 0.1 0.0 0.1 0.6 -0.5 0.1 -0.3 -0.4 -0.0 0.1 0.2 -0.0 -0.0 \n 0.0 -0.0 -0.0 -0.0 0.0 0.1 0.0 0.1 0.6 0.5 0.1 0.5 -0.2 0.0 -0.2 -0.2 -0.0 -0.0 \n-0.0 0.0 0.0 0.0 0.0 0.7 0.0 0.4 0.1 0.0 0.2 0.0 0.4 -0.0 0.4 -0.2 0.0 -0.0 \n-0.0 -0.0 0.0 -0.0 0.0 0.2 -0.0 -0.0 0.0 -0.3 -0.7 0.5 0.1 -0.0 -0.1 0.3 -0.0 0.0 \n-0.0 0.0 -0.0 -0.0 0.0 0.0 0.0 0.4 0.0 0.4 -0.5 -0.6 -0.0 0.0 -0.3 -0.1 -0.0 0.0 \n-0.0 0.0 0.0 0.0 0.0 0.1 0.0 0.4 -0.4 -0.4 0.1 0.2 -0.4 0.0 -0.4 -0.4 0.0 0.0 \n-0.0 -0.0 0.0 0.0 0.0 0.4 -0.0 -0.0 -0.4 0.3 0.0 -0.0 -0.6 -0.0 0.2 0.5 0.0 0.0 \n-0.0 0.0 -0.0 0.0 -0.0 -0.4 0.0 0.6 0.0 0.0 0.2 0.2 0.2 -0.0 -0.1 0.6 -0.0 -0.0 \n-0.0 0.0 -0.0 -0.0 -0.0 -0.4 0.0 0.3 -0.0 0.0 -0.3 0.2 -0.3 -0.0 0.7 -0.3 0.0 -0.0 \n 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 0.0 -0.0 0.0 0.3 0.3 \n 0.0 -0.0 0.0 0.0 0.0 -0.0 0.0 0.0 0.0 -0.0 0.0 0.0 0.0 0.0 -0.0 0.0 0.3 -0.3 \n 0.0 -0.0 0.0 0.0 0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.5 -0.0 -0.0 -0.6 -0.0 \n-0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 -0.0 0.0 -0.0 -0.5 -0.0 -0.0 0.0 -0.6 \n-0.0 -0.0 -0.0 0.0 0.0 -0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.5 -0.0 -0.0 -0.0 0.6 \n-0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 -0.5 -0.0 -0.0 0.6 0.0 \n-0.9 0.4 0.2 -0.0 0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 -0.0 \n-0.1 0.3 -1.0 -0.0 0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 0.0 0.0 \n 0.5 0.9 0.2 0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 0.0 -0.0 -0.0 \n 0.0 0.0 0.0 -0.0 -1.0 0.0 0.0 0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 -0.0 \n-0.0 0.0 0.0 -0.0 0.0 0.0 1.0 -0.0 -0.0 -0.0 0.0 0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 \n-0.0 0.0 -0.0 1.0 -0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 -0.0 -0.0 0.0 -0.0 0.0 0.0 \n"
],
[
"print(\"coords in new basis\")\npmat2d(M.coordinates.T,format='f')\n",
"coords in new basis\n-0.0 -0.1 -0.1 -0.0 -0.0 0.9 0.0 3.8 -0.2 0.2 -0.9 1.2 -2.4 -0.0 0.7 0.6 3.9 0.0 \n"
],
[
"print(\"update Hessian in basis\")\npmat2d(M.form_Hessian_in_basis(),format='f')\n",
"update Hessian in basis\n 0.1 0.0 -0.0 -0.0 -0.0 -0.0 -0.0 -0.0 0.0 0.0 -0.0 0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 \n 0.0 0.1 0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 0.0 0.0 0.0 -0.0 -0.0 0.0 \n-0.0 0.0 0.1 -0.0 -0.0 0.0 0.0 -0.0 -0.0 -0.0 -0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.0 \n-0.0 -0.0 -0.0 0.1 -0.0 0.0 -0.0 0.0 -0.0 0.0 0.0 0.0 0.0 -0.0 0.0 -0.0 0.0 -0.0 \n-0.0 0.0 -0.0 -0.0 0.1 0.0 0.0 0.0 0.0 0.0 -0.0 0.0 0.0 -0.0 0.0 -0.0 0.0 0.0 \n-0.0 -0.0 0.0 0.0 0.0 0.3 0.0 0.1 0.0 -0.0 -0.0 0.0 0.0 -0.0 0.0 -0.0 -0.0 0.0 \n-0.0 0.0 0.0 -0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 0.0 -0.0 0.0 -0.0 \n-0.0 0.0 -0.0 0.0 0.0 0.1 0.0 0.2 0.0 0.0 -0.0 -0.0 0.0 -0.0 0.0 -0.0 0.0 -0.0 \n 0.0 -0.0 -0.0 -0.0 0.0 0.0 0.0 0.0 0.3 0.0 0.0 0.0 -0.1 0.0 -0.0 0.0 -0.0 -0.0 \n 0.0 -0.0 -0.0 0.0 0.0 -0.0 0.0 0.0 0.0 0.3 0.0 0.0 0.0 0.0 -0.0 -0.1 -0.0 0.0 \n-0.0 0.0 -0.0 0.0 -0.0 -0.0 -0.0 -0.0 0.0 0.0 0.3 -0.0 -0.0 -0.0 0.1 -0.0 0.0 -0.0 \n 0.0 -0.0 0.0 0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 0.3 0.0 0.0 -0.0 0.0 0.0 -0.0 \n-0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.1 0.0 -0.0 0.0 0.2 -0.0 0.0 -0.0 -0.0 -0.0 \n 0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 -0.0 0.0 0.0 -0.0 0.0 -0.0 0.0 -0.0 -0.0 -0.0 0.0 \n-0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.0 -0.0 0.1 -0.0 0.0 -0.0 0.2 0.0 0.0 -0.0 \n 0.0 -0.0 0.0 -0.0 -0.0 -0.0 -0.0 -0.0 0.0 -0.1 -0.0 0.0 -0.0 -0.0 0.0 0.2 -0.0 -0.0 \n 0.0 -0.0 0.0 0.0 0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 0.0 -0.0 0.0 0.0 \n-0.0 0.0 -0.0 -0.0 0.0 0.0 -0.0 -0.0 -0.0 0.0 -0.0 -0.0 -0.0 0.0 -0.0 -0.0 0.0 0.0 \n"
],
[
"# copying molecule automatically copies all Data\nprintcool(\"copy molecule \\n Note that the copy from options is recommended since it properly creates new coord_obj and PES object\")\nnewMolecule = Molecule.copy_from_options(M)\n",
"#============================================================================================================#\n#| \u001b[92m copy molecule \u001b[0m |#\n#| \u001b[92m Note that the copy from options is recommended since it properly creates new coord_obj and PES object \u001b[0m |#\n#============================================================================================================#\n initializing LOT from file\n setting primitives from options!\n getting cartesian coordinates from geom\n getting coord_object from options\n"
],
[
"print(newMolecule)",
" molecule object\nOption:\n Key: fnm\n Value: data/ethylene.xyz\n Required: False\n Allowed Types: [<type 'str'>]\n Allowed Values: None\n Doc: File name to create the Molecule object from. Only used if geom is none.\n\nOption:\n Key: ftype\n Value: xyz\n Required: False\n Allowed Types: [<type 'str'>]\n Allowed Values: None\n Doc: filetype (optional) will attempt to read filetype if not given\n\nOption:\n Key: coordinate_type\n Value: TRIC\n Required: False\n Allowed Types: None\n Allowed Values: ['Cartesian', 'DLC', 'HDLC', 'TRIC']\n Doc: The type of coordinate system to build\n\nOption:\n Key: coord_obj\n Value: Internal coordinate system (atoms numbered from 1):\nDistance 1-2\nDistance 1-3\nDistance 1-4\nDistance 4-5\nDistance 4-6\nAngle 2-1-4\nAngle 3-1-4\nAngle 1-4-6\nAngle 5-4-6\nOut-of-Plane 1-2-3-4\nOut-of-Plane 4-1-5-6\nDihedral 2-1-4-5\nDihedral 2-1-4-6\nDihedral 3-1-4-5\nDihedral 3-1-4-6\nTranslation-X 1-6\nTranslation-Y 1-6\nTranslation-Z 1-6\nRotation-A 1-6\nRotation-B 1-6\nRotation-C 1-6\n<class 'slots.Distance'> : 5\n<class 'slots.Angle'> : 4\n<class 'slots.OutOfPlane'> : 2\n<class 'slots.Dihedral'> : 4\n<class 'slots.TranslationX'> : 1\n<class 'slots.TranslationY'> : 1\n<class 'slots.TranslationZ'> : 1\n<class 'slots.RotationA'> : 1\n<class 'slots.RotationB'> : 1\n<class 'slots.RotationC'> : 1\n Required: False\n Allowed Types: [<class 'dlc_new.DelocalizedInternalCoordinates'>, <class 'cartesian.CartesianCoordinates'>]\n Allowed Values: None\n Doc: A coordinate object.\n\nOption:\n Key: geom\n Value: [('C', -0.0068897336218653165, 0.69925693300335223, -0.093863468038289627), ('H', -0.93045168401614453, 1.2734400127931729, -0.093859468038289581), ('H', 0.91667231598385546, 1.2734398532135316, -0.093859468038289581), ('C', -0.0068898486044534522, -0.63165906699664776, -0.093863468038289641), ('H', 0.91667210178982583, -1.2058421467864684, -0.093859468038289651), ('H', -0.93045189821017416, -1.2058419872068269, -0.093859468038289651)]\n Required: False\n Allowed Types: [<type 'list'>]\n Allowed Values: None\n Doc: geometry including atomic symbols\n\nOption:\n Key: xyz\n Value: [[-0.0069 0.6993 -0.0939]\n [-0.9305 1.2734 -0.0939]\n [ 0.9167 1.2734 -0.0939]\n [-0.0069 -0.6317 -0.0939]\n [ 0.9167 -1.2058 -0.0939]\n [-0.9305 -1.2058 -0.0939]]\n Required: False\n Allowed Types: [<type 'numpy.ndarray'>]\n Allowed Values: None\n Doc: The Cartesian coordinates in Angstrom\n\nOption:\n Key: PES\n Value: <avg_pes.Avg_PES object at 0x7f428edc5f90>\n Required: True\n Allowed Types: [<class 'pes.PES'>, <class 'avg_pes.Avg_PES'>, <class 'penalty_pes.Penalty_PES'>]\n Allowed Values: None\n Doc: potential energy surface object to evaulate energies, gradients, etc. Pes is defined by charge, state, multiplicity,etc. \n\nOption:\n Key: Primitive_Hessian\n Value: [[ 0.35 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0.35 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0.35 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0.35 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0.35 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0.16 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0.16 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0.16 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0.16 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.045 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.045\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0.023 0. 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0.023 0. 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0.023 0. 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0.023 0. 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0.05 0. 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0.05 0. 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0.05 0. 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0.05 0. 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0.05 0. ]\n [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.05 ]]\n Required: False\n Allowed Types: None\n Allowed Values: None\n Doc: Primitive hessian save file for doing optimization.\n\nOption:\n Key: Hessian\n Value: [[ 0.05 0. -0. -0. -0. -0. -0. -0. 0. 0.\n -0. 0. -0. 0. -0. 0. 0. -0. ]\n [ 0. 0.05 0. -0. 0. -0. 0. 0. -0. -0.\n 0. -0. 0. 0. 0. -0. -0. 0. ]\n [-0. 0. 0.05 -0. -0. 0. 0. -0. -0. -0.\n -0. 0. 0. 0. 0. 0. 0. -0. ]\n [-0. -0. -0. 0.05 -0. 0. -0. 0. -0. 0.\n 0. 0. 0. -0. 0. -0. 0. -0. ]\n [-0. 0. -0. -0. 0.05 0. 0. 0. 0. 0.\n -0. 0. 0. -0. 0. -0. 0. 0. ]\n [-0. -0. 0. 0. 0. 0.2612 0. 0.0586 0.0378\n -0.0102 -0.0137 0.0247 0.0387 -0. 0.0408 -0.0079 -0. 0. ]\n [-0. 0. 0. -0. 0. 0. 0.05 0. 0. 0.\n -0. -0. 0. 0. 0. -0. 0. -0. ]\n [-0. 0. -0. 0. 0. 0.0586 0. 0.2269 0.0258\n 0.0235 -0.0181 -0.0479 0.0197 -0. 0.0138 -0.0196 0. -0. ]\n [ 0. -0. -0. -0. 0. 0.0378 0. 0.0258 0.2917\n 0.0003 0.0086 0.0131 -0.0719 0. -0.0129 0.0033 -0. -0. ]\n [ 0. -0. -0. 0. 0. -0.0102 0. 0.0235 0.0003\n 0.3023 0.0082 0.01 0.0119 0. -0.0469 -0.0602 -0. 0. ]\n [-0. 0. -0. 0. -0. -0.0137 -0. -0.0181 0.0086\n 0.0082 0.3234 -0.0009 -0.0119 -0. 0.0509 -0.031 0. -0. ]\n [ 0. -0. 0. 0. 0. 0.0247 -0. -0.0479 0.0131\n 0.01 -0.0009 0.3292 0.0171 0. -0.0051 0.0045 0. -0. ]\n [-0. 0. 0. 0. 0. 0.0387 0. 0.0197 -0.0719\n 0.0119 -0.0119 0.0171 0.2411 -0. 0.0311 -0.0156 -0. -0. ]\n [ 0. 0. 0. -0. -0. -0. 0. -0. 0. 0.\n -0. 0. -0. 0.023 -0. -0. -0. 0. ]\n [-0. 0. 0. 0. 0. 0.0408 0. 0.0138 -0.0129\n -0.0469 0.0509 -0.0051 0.0311 -0. 0.2201 0.001 0. -0. ]\n [ 0. -0. 0. -0. -0. -0.0079 -0. -0.0196 0.0033\n -0.0602 -0.031 0.0045 -0.0156 -0. 0.001 0.1941 -0. -0. ]\n [ 0. -0. 0. 0. 0. -0. 0. 0. -0. -0.\n 0. 0. -0. -0. 0. -0. 0.0282 0. ]\n [-0. 0. -0. -0. 0. 0. -0. -0. -0. 0.\n -0. -0. -0. 0. -0. -0. 0. 0.0282]]\n Required: False\n Allowed Types: None\n Allowed Values: None\n Doc: Hessian save file in the basis of coordinate_type.\n\nOption:\n Key: comment\n Value: \n Required: False\n Allowed Types: None\n Allowed Values: None\n Doc: A string that is saved on the molecule, used for descriptive purposes\n\n\n"
],
[
"printcool(\"copy molecule with new geom\")\nnewxyz2 = np.ones(M.xyz.shape)\nnewxyz2 += M.xyz\nnewMolecule2 = Molecule.copy_from_options(M,xyz=newxyz2)\nprint(newMolecule2.xyz)\n",
"#========================================================#\n#| \u001b[92m copy molecule with new geom \u001b[0m |#\n#========================================================#\n initializing LOT from file\n setting primitives from options!\n getting cartesian coordinates from geom\n getting coord_object from options\n[[ 0.9931 1.6993 0.9061]\n [ 0.0695 2.2734 0.9061]\n [ 1.9167 2.2734 0.9061]\n [ 0.9931 0.3683 0.9061]\n [ 1.9167 -0.2058 0.9061]\n [ 0.0695 -0.2058 0.9061]]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7998ab2a40bc9e4817d145e8933fbf02888e1e6 | 12,916 | ipynb | Jupyter Notebook | Assignment - 1 ( Day 2 ).ipynb | jsharmi18421245/LetsUpgrade-pyhton | fe9e9acf5768abdf643ae3f6a0b7386cd249e7d2 | [
"MIT"
] | 1 | 2020-09-13T08:24:48.000Z | 2020-09-13T08:24:48.000Z | Assignment - 1 ( Day 2 ).ipynb | jsharmi18421245/LetsUpgrade-pyhton | fe9e9acf5768abdf643ae3f6a0b7386cd249e7d2 | [
"MIT"
] | null | null | null | Assignment - 1 ( Day 2 ).ipynb | jsharmi18421245/LetsUpgrade-pyhton | fe9e9acf5768abdf643ae3f6a0b7386cd249e7d2 | [
"MIT"
] | null | null | null | 18.268741 | 73 | 0.438913 | [
[
[
"# Day 2 Assignment - 1\n",
"_____no_output_____"
]
],
[
[
"Q.No.1 # List and its default functions",
"_____no_output_____"
],
[
"# (i) Add list element as value of list\n# append():\nlist = [\"Jansi\",\"Jessie\",\"Sheela\",\"Thabita\"]\nlist.append(\"jabamalar\")\nprint(list)",
"['Jansi', 'Jessie', 'Sheela', 'Thabita', 'jabamalar']\n"
],
[
"# (ii) Insert():\n# Insert at index value 1990\nlist.insert(1969,1990)\nprint(list)",
"['Jansi', 'Jessie', 'Sheela', 'Thabita', 'jabamalar', 1990]\n"
],
[
"# (iii) extend():\nList1 = [12, 23, 34, 45] \n\nList2 = [34, 45, 56, 67, 78] \n\n \n# Add List2 to List1 \nList1.extend(List2) \n\nprint(List1) \n\n \n# Add List1 to List2 now \nList2.extend(List1) \n\nprint(List2) \n",
"[12, 23, 34, 45, 34, 45, 56, 67, 78]\n[34, 45, 56, 67, 78, 12, 23, 34, 45, 34, 45, 56, 67, 78]\n"
],
[
"# (iv) count():\nList = [12, 23, 45, 34, 45, 34, 45, 56, 67, 78, 45, 89, 90, 45] \n\nprint(List.count(45)) ",
"5\n"
],
[
"# (v) Sum:\nList = [12, 23, 34, 45,] \n\nprint(sum(List))",
"114\n"
],
[
"Q.No.2 # Dictionary and its default funtions\n",
"_____no_output_____"
],
[
"# (i) Creating a Dictionary \n# with Integer Keys \nDict = {1: 'Apple', 2: 'Orange', 3: 'Mangoes'} \nprint(Dict) ",
"{1: 'Apple', 2: 'Orange', 3: 'Mangoes'}\n"
],
[
"# (ii) Creating a Dictionary \n# with Mixed keys \nDict = {'Name': 'Jansi', 1: [100, 101, 102]} \nprint(Dict) ",
"{'Name': 'Jansi', 1: [100, 101, 102]}\n"
],
[
"# (iii) Creating an empty Dictionary \nDict = {} \nprint(Dict) ",
"{}\n"
],
[
"# (iv) Creating a Dictionary \n# with dict() method \nDict = dict({1: 'Apple', 2: 'Orange', 3:'Mangoes'}) \nprint(Dict) ",
"{1: 'Apple', 2: 'Orange', 3: 'Mangoes'}\n"
],
[
"# (v) Creating a Dictionary \n# with each item as a Pair \nDict = dict([(1, 'Apple'), (2, 'Mangoes')]) \nprint(Dict) ",
"{1: 'Apple', 2: 'Mangoes'}\n"
],
[
"Q.No.3 # Sets and its default functions",
"_____no_output_____"
],
[
"# (i) Creating a Set \nset1 = set() \nprint(set1) ",
"set()\n"
],
[
"# (ii) Creating a Set with \n# the use of a String \nset1 = set(\"Sneha\") \nprint(set1) ",
"{'e', 'a', 'h', 'S', 'n'}\n"
],
[
"# (iii) Creating a Set with \n# the use of Constructor \n# (Using object to Store String) \nString = 'God is Great'\nset1 = set(String) \nprint(set1) ",
"{'e', 'G', 'a', 'o', 'r', 'd', 's', ' ', 't', 'i'}\n"
],
[
"# (iv) Creating a Set with \n# the use of a List \nset1 = set([\"God\", \"is\", \"Great\"]) \nprint(set1)",
"{'is', 'Great', 'God'}\n"
],
[
"# (v) Creating a Set with \n# a List of Numbers \n# (Having duplicate values) \nset1 = set([1, 2, 4, 4, 3, 3, 3, 6, 5]) \nprint(set1) ",
"{1, 2, 3, 4, 5, 6}\n"
],
[
"Q.No.4 Tuple and its default funtions",
"_____no_output_____"
],
[
"# (i) Creating an empty Tuple \nTuple1 = () \nprint (Tuple1) ",
"()\n"
],
[
"# (ii) Creatting a Tuple \n#with the use of string \nTuple1 = ('Wing', 'Star') \nprint(Tuple1) ",
"('Wing', 'Star')\n"
],
[
"# (iii) Creating a Tuple with \n# the use of list \nlist1 = [11, 22, 33, 44, 55] \nprint(tuple(list1)) ",
"(11, 22, 33, 44, 55)\n"
],
[
"# (iv) Creating a Tuple \n#with the use of built-in function \nTuple1 = tuple('Jansi') \nprint(Tuple1) ",
"('J', 'a', 'n', 's', 'i')\n"
],
[
"# (v) Creating a Tuple \n#with Mixed Datatype \nTuple1 = (12, 'Maths', 56, 'Science') \nprint(Tuple1) ",
"(12, 'Maths', 56, 'Science')\n"
],
[
"Q.No.5 # String and its default functions",
"_____no_output_____"
],
[
"# (i) Creating a Tuple \n#with Mixed Datatype \nTuple1 = (90, 'Sneha', 79, 'Sharmila') \nprint(Tuple1) ",
"(90, 'Sneha', 79, 'Sharmila')\n"
],
[
"# (ii) Creating a String \n# with double Quotes \nString1 = \" I love python \"\nprint(String1) ",
" I love python \n"
],
[
"# (iii) Creating a String \n# with triple Quotes \nString1 = '''I Want to learn python'''\nprint(String1) ",
"I Want to learn python\n"
],
[
"# (iv) Creating String with triple \n# Quotes allows multiple lines \nString1 = '''Enjoy \n the \n Life'''\nprint(String1) ",
"Enjoy \n the \n Life\n"
],
[
"# (v) Creating a string\nString1 = \"Javascript\"\nprint(String1) ",
"Javascript\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7999f245ded5d8225974b3142a6228331c94c64 | 21,530 | ipynb | Jupyter Notebook | examples/basic/flopy3boundaries.ipynb | kwilcox/flopy | 527c4ee452ea779bdebd6c1c540452d145e26943 | [
"BSD-3-Clause"
] | null | null | null | examples/basic/flopy3boundaries.ipynb | kwilcox/flopy | 527c4ee452ea779bdebd6c1c540452d145e26943 | [
"BSD-3-Clause"
] | null | null | null | examples/basic/flopy3boundaries.ipynb | kwilcox/flopy | 527c4ee452ea779bdebd6c1c540452d145e26943 | [
"BSD-3-Clause"
] | null | null | null | 34.338118 | 497 | 0.500232 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e799a2f80d3ae0a1653a964f30049d90e09520b0 | 603,033 | ipynb | Jupyter Notebook | Packages/Get_Data_From_OSM/Get_open_street_map_lines.ipynb | ryan0124/ACEP_Capstone_Project | a58044be3d3eb00663a8eaf97b89c1d39de73a3e | [
"MIT"
] | null | null | null | Packages/Get_Data_From_OSM/Get_open_street_map_lines.ipynb | ryan0124/ACEP_Capstone_Project | a58044be3d3eb00663a8eaf97b89c1d39de73a3e | [
"MIT"
] | 2 | 2022-03-08T19:38:52.000Z | 2022-03-16T23:39:37.000Z | Packages/Get_Data_From_OSM/Get_open_street_map_lines.ipynb | ryan0124/ACEP_Capstone_Project | a58044be3d3eb00663a8eaf97b89c1d39de73a3e | [
"MIT"
] | null | null | null | 55.800222 | 13,845 | 0.59339 | [
[
[
"# Open Street Map Buildings Information\n\nThis notebook demonstrates downloading data from Open Street Map to fill gaps in the [Global Electricity Transmission And Distribution Lines](https://datacatalog.worldbank.org/dataset/derived-map-global-electricity-transmission-and-distribution-lines) (GETD) dataset.\n\nTo obtain GIS data from Open Street Map of any specified administrative area in the world, the [GeoFabrik](http://download.geofabrik.de/) download server is the easiest solution for querying data at the administrative level.\n\nIn this notebook, we address obtaining data for areas of Alaska\n\nWe will download the latest version of this data (currently 12/20/21) and extract the commercial, industrial and residential buildings information.\n\n\n#Resources\n* https://wiki.openstreetmap.org/wiki/Map_features#Power\n* https://dlr-ve-esy.gitlab.io/esy-osmfilter/main.html\n* https://stackoverflow.com/questions/66367195/get-all-ways-by-using-esy-osmfilter",
"_____no_output_____"
],
[
"### Mount Drive folder\n\nMount Drive folder for saving this data.",
"_____no_output_____"
]
],
[
[
"from google.colab import drive\ndrive.mount('/content/drive')",
"Mounted at /content/drive\n"
]
],
[
[
"### Get packages\n\nInstall packages needed for analysis and import into workspace.",
"_____no_output_____"
]
],
[
[
"!pip install esy-osmfilter # gives tags and filters to open street map data\n!pip install geopandas #to make working with geospatial data in python easier\n%load_ext autoreload\n%autoreload 2",
"Collecting esy-osmfilter\n Downloading esy_osmfilter-1.0.11-py3-none-any.whl (30 kB)\nRequirement already satisfied: protobuf<4,>=3 in /usr/local/lib/python3.7/dist-packages (from esy-osmfilter) (3.17.3)\nCollecting esy-osm-pbf>=0\n Downloading esy_osm_pbf-0.0.1-py3-none-any.whl (10 kB)\nRequirement already satisfied: six>=1.9 in /usr/local/lib/python3.7/dist-packages (from protobuf<4,>=3->esy-osmfilter) (1.15.0)\nInstalling collected packages: esy-osm-pbf, esy-osmfilter\nSuccessfully installed esy-osm-pbf-0.0.1 esy-osmfilter-1.0.11\nCollecting geopandas\n Downloading geopandas-0.10.2-py2.py3-none-any.whl (1.0 MB)\n\u001b[K |████████████████████████████████| 1.0 MB 9.7 MB/s \n\u001b[?25hCollecting pyproj>=2.2.0\n Downloading pyproj-3.2.1-cp37-cp37m-manylinux2010_x86_64.whl (6.3 MB)\n\u001b[K |████████████████████████████████| 6.3 MB 12.4 MB/s \n\u001b[?25hRequirement already satisfied: shapely>=1.6 in /usr/local/lib/python3.7/dist-packages (from geopandas) (1.8.1.post1)\nCollecting fiona>=1.8\n Downloading Fiona-1.8.21-cp37-cp37m-manylinux2014_x86_64.whl (16.7 MB)\n\u001b[K |████████████████████████████████| 16.7 MB 192 kB/s \n\u001b[?25hRequirement already satisfied: pandas>=0.25.0 in /usr/local/lib/python3.7/dist-packages (from geopandas) (1.3.5)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (57.4.0)\nRequirement already satisfied: click>=4.0 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (7.1.2)\nCollecting click-plugins>=1.0\n Downloading click_plugins-1.1.1-py2.py3-none-any.whl (7.5 kB)\nRequirement already satisfied: six>=1.7 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (1.15.0)\nCollecting munch\n Downloading munch-2.5.0-py2.py3-none-any.whl (10 kB)\nRequirement already satisfied: attrs>=17 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (21.4.0)\nRequirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (2021.10.8)\nCollecting cligj>=0.5\n Downloading cligj-0.7.2-py3-none-any.whl (7.1 kB)\nRequirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.0->geopandas) (1.21.5)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.0->geopandas) (2.8.2)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.0->geopandas) (2018.9)\nInstalling collected packages: munch, cligj, click-plugins, pyproj, fiona, geopandas\nSuccessfully installed click-plugins-1.1.1 cligj-0.7.2 fiona-1.8.21 geopandas-0.10.2 munch-2.5.0 pyproj-3.2.1\n"
],
[
"import configparser, contextlib\nimport os, sys\nimport geopandas as gpd\nimport pandas as pd\nfrom esy.osmfilter import osm_colors as CC\nfrom esy.osmfilter import run_filter \nfrom esy.osmfilter import Node, Way, Relation\nfrom esy.osmfilter import export_geojson\n%load_ext autoreload\n%autoreload 2",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
]
],
[
[
"### To Downlaod the main pbf file (No need to run)",
"_____no_output_____"
]
],
[
[
"## NO NEED TO RUN\n!wget http://download.geofabrik.de/north-america/us/alaska-latest.osm.pbf -P '/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'",
"--2022-03-09 04:44:44-- http://download.geofabrik.de/north-america/us/alaska-latest.osm.pbf\nResolving download.geofabrik.de (download.geofabrik.de)... 95.216.28.113, 116.202.112.212\nConnecting to download.geofabrik.de (download.geofabrik.de)|95.216.28.113|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 118348072 (113M) [application/octet-stream]\nSaving to: ‘/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-latest.osm.pbf.4’\n\nalaska-latest.osm.p 100%[===================>] 112.87M 19.8MB/s in 6.9s \n\n2022-03-09 04:44:51 (16.4 MB/s) - ‘/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-latest.osm.pbf.4’ saved [118348072/118348072]\n\n"
]
],
[
[
"# Function to download different types of buildings\n\n",
"_____no_output_____"
]
],
[
[
"# Getting residential buildings\ndef get_res_buildings(area_name): \n \n # Set input/output locations\n PBF_inputfile = os.path.join(os.getcwd(), '/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-latest.osm.pbf')\n JSON_outputfile = os.path.join(os.getcwd(),'/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-res_buildings.json')\n \n # Pre-filter for all residential buildings\n prefilter={Node: {}, Way: {\"building\":[\"apartments\",\"bungalow\",\"cabin\",\"detached\",\"dormitory\",\"farm\",\n \"hotel\",\"house\",\"residential\",\"church\",\"garage\",\"garages\",],\n \"tourism\":[\"alpine_hut\",\"apartment\",\"hostel\",\"hotel\",\"motel\",\n ],\n \"addr\":[True,],}, Relation: {}}\n whitefilter = []\n blackfilter = []\n \n # Create initial data\n [Data, _]=run_filter('noname',\n PBF_inputfile, \n JSON_outputfile, \n prefilter,\n whitefilter,\n blackfilter,\n NewPreFilterData=True, \n CreateElements=False, \n LoadElements=False,\n verbose=True) \n # # Check that data exists\n print(len(Data['Way']))\n \n # # Get residential buildings elements\n whitefilter=[((\"building\",\"apartments\"),), ((\"building\",\"bungalow\"),),((\"building\",\"cabin\"),),\n ((\"building\",\"detached\"),), ((\"building\",\"dormitory\"),),((\"building\",\"farm\"),),\n ((\"building\",\"hotel\"),), ((\"building\",\"house\"),),((\"building\",\"residential\"),),\n ((\"building\",\"church\"),), ((\"building\",\"garage\"),),((\"building\",\"garages\"),),\n ((\"tourism\",\"alpine_hut\"),), ((\"tourism\",\"apartment\"),),((\"tourism\",\"hostel\"),),\n ((\"tourism\",\"hotel\"),), ((\"tourism\",\"motel\"),),((\"addr\",True),),\n ]\n blackfilter=[((),),]\n \n # Apply filter\n [Data, Elements]=run_filter('powerlines',\n PBF_inputfile, \n JSON_outputfile, \n prefilter,\n whitefilter, \n blackfilter, \n NewPreFilterData=False, \n CreateElements=True, \n LoadElements=False,\n verbose=True)\n \n print(len(Data['Way']))\n\n \n # Export data to geojson\n export_geojson(Elements['powerlines']['Way'],Data, filename='/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-res_buildings.geojson',jsontype='Line')\n \n # Read data into geopandas\n gdf = gpd.read_file('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-res_buildings.geojson')\n \n # Write as shapefile\n gdf.to_file('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-res_buildings.shp')\n \n #Plot output\n gdf.plot()\n",
"_____no_output_____"
],
[
"# Getting commercial buildings\ndef get_com_buildings(area_name): \n \n # Set input/output locations\n PBF_inputfile = os.path.join(os.getcwd(), '/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-latest.osm.pbf')\n JSON_outputfile = os.path.join(os.getcwd(),'/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-com_buildings.json')\n \n # Pre-filter for all commercial buildings\n prefilter={Node: {\"amenity\":[\"charging_staion\",\"atm\",\"strip_club\",],}, Way: {\"aeroway\":[\"terminal\",],\"amenity\":[\"bar\",\"cafe\",\"fast_food\",\"food_court\",\"pub\",\"restaurant\",\"college\",\"kindergarten\",\n \"library\",\"school\",\"university\",\"bus_station\",\"car_rental\",\"car_wash\",\n \"fuel\",\"bank\",\"clinic\",\"dentist\",\n \"doctor\",\"hospital\",\"nursing_home\",\"pharmacy\",\"socail_facility\",\n \"veterinary\",\"brothel\",\"casino\",\"cinema\",\"community_centre\",\n \"conference_centre\",\"gambling\",\"love_hotel\",\"night_club\",\n \"planetarium\",\"studio\",\"swingerclub\",\n \"theatre\",\"courthouse\",\"embassy\",\"fire_station\",\n \"police\",\"post_office\",\"prison\",\"townhall\",\n \"creamatorium\",\"internet_cafe\",\"place_of_worship\",],\n \"building\":[\"commerical\",\"office\",\"retail\",\"supermarket\",\"warehouse\",\"bakehouse\",\"civic\",\"college\",\"fire_station\",\n \"government\",\"hospital\",\"kindergarten\",\"public\",\"school\",\"train_staion\",\"transportaion\",\n \"university\",\"stadium\",\"parking\",],\n \"leisure\":[\"adult_gaming_centre\",\"amusement_arcade\",\"beach_resort\",\"fitness_centre\",\n \"sports_centre\",\"stadium\",\"summer_camp\",],\n \"office\":[\"accountant\",\"association\",\"charity\",\"company\",\"comsulting\",\"courier\",\n \"dipomatic\",\"educational_institution\",\"employment_agency\",\"engineer\",\n \"estate_agent\",\"financial\",\"forestry\",\"foundation\",\"government\",\"insurance\",\n \"lawyer\",\"logistics\",\"moving_company\",\"ngo\",\"political_party\",\n \"property_management\",\"religion\",\"research\",\"security\",\"tax_advisor\",\"telecommunication\",\n \"union\",\"water_utility\",\"yes\"],\n \"shop\":[True],\n \"tourism\":[\"aquarium\",\"gallery\",\"museum\",\"zoo\",\"yes\"],\n \"addr\":[True,],}, Relation: {}}\n whitefilter = []\n blackfilter = []\n \n # Create initial data\n [Data, _]=run_filter('noname',\n PBF_inputfile, \n JSON_outputfile, \n prefilter,\n whitefilter,\n blackfilter,\n NewPreFilterData=True, \n CreateElements=False, \n LoadElements=False,\n verbose=True) \n # # Check that data exists\n print(len(Data['Node']))\n \n # # Get commercial buildings elements\n whitefilter=[((\"aeroway\",\"terminal\"),), ((\"amenity\",\"bar\"),), ((\"amenity\",\"cafe\"),), ((\"amenity\",\"fast_food\"),), ((\"amenity\",\"food_court\"),), ((\"amenity\",\"pub\"),), ((\"amenity\",\"restaurant\"),), \n ((\"amenity\",\"college\"),), ((\"amenity\",\"kindergarten\"),), ((\"amenity\",\"library\"),), ((\"amenity\",\"school\"),), ((\"amenity\",\"university\"),), ((\"amenity\",\"bus_station\"),),\n ((\"amenity\",\"car_rental\"),), ((\"amenity\",\"car_wash\"),), ((\"amenity\",\"fuel\"),), ((\"amenity\",\"bank\"),), ((\"amenity\",\"clinic\"),), ((\"amenity\",\"dentist\"),), ((\"amenity\",\"doctor\"),),\n ((\"amenity\",\"hospital\"),), ((\"amenity\",\"nursing_home\"),), ((\"amenity\",\"pharmacy\"),), ((\"amenity\",\"socail_facility\"),), ((\"amenity\",\"veterinary\"),), ((\"amenity\",\"brothel\"),), \n ((\"amenity\",\"casino\"),), ((\"amenity\",\"cinema\"),), ((\"amenity\",\"community_centre\"),), ((\"amenity\",\"conference_centre\"),), ((\"amenity\",\"gambling\"),), ((\"amenity\",\"love_hotel\"),),\n ((\"amenity\",\"night_club\"),), ((\"amenity\",\"planetarium\"),), ((\"amenity\",\"studio\"),), ((\"amenity\",\"swingerclub\"),), ((\"amenity\",\"theatre\"),), ((\"amenity\",\"courthouse\"),),\n ((\"amenity\",\"embassy\"),), ((\"amenity\",\"fire_station\"),), ((\"amenity\",\"police\"),), ((\"amenity\",\"post_office\"),), ((\"amenity\",\"prison\"),), ((\"amenity\",\"townhall\"),), ((\"amenity\",\"creamatorium\"),),\n ((\"amenity\",\"internet_cafe\"),), ((\"amenity\",\"place_of_worship\"),), ((\"building\",\"commerical\"),), ((\"building\",\"office\"),), ((\"building\",\"retail\"),), ((\"building\",\"supermarket\"),),\n ((\"building\",\"warehouse\"),), ((\"building\",\"bakehouse\"),), ((\"building\",\"civic\"),), ((\"building\",\"college\"),), ((\"building\",\"fire_station\"),), ((\"building\",\"government\"),),\n ((\"building\",\"hospital\"),), ((\"building\",\"kindergarten\"),), ((\"building\",\"public\"),), ((\"building\",\"school\"),), ((\"building\",\"train_staion\"),), ((\"building\",\"transportaion\"),),\n ((\"building\",\"university\"),), ((\"building\",\"stadium\"),), ((\"building\",\"parking\"),), ((\"leisure\",\"adult_gaming_centre\"),), ((\"leisure\",\"amusement_arcade\"),), ((\"leisure\",\"beach_resort\"),),\n ((\"leisure\",\"amusement_arcade\"),), ((\"leisure\",\"amusement_arcade\"),), ((\"leisure\",\"amusement_arcade\"),), ((\"leisure\",\"amusement_arcade\"),), ((\"leisure\",\"amusement_arcade\"),),\n ((\"leisure\",\"fitness_centre\"),), ((\"leisure\",\"sports_centre\"),), ((\"leisure\",\"stadium\"),), ((\"leisure\",\"summer_camp\"),), ((\"office\",\"accountant\"),), ((\"office\",\"association\"),),\n ((\"office\",\"charity\"),), ((\"office\",\"company\"),), ((\"office\",\"comsulting\"),), ((\"office\",\"courier\"),), ((\"office\",\"dipomatic\"),), ((\"office\",\"educational_institution\"),),\n ((\"office\",\"employment_agency\"),), ((\"office\",\"engineer\"),), ((\"office\",\"estate_agent\"),), ((\"office\",\"financial\"),), ((\"office\",\"forestry\"),), ((\"office\",\"foundation\"),),\n ((\"office\",\"government\"),), ((\"office\",\"insurance\"),), ((\"office\",\"lawyer\"),), ((\"office\",\"logistics\"),), ((\"office\",\"moving_company\"),), ((\"office\",\"ngo\"),),\n ((\"office\",\"political_party\"),), ((\"office\",\"property_management\"),), ((\"office\",\"religion\"),), ((\"office\",\"research\"),), ((\"office\",\"security\"),), ((\"office\",\"tax_advisor\"),),\n ((\"office\",\"telecommunication\"),), ((\"office\",\"union\"),), ((\"office\",\"water_utility\"),), ((\"office\",\"yes\"),), ((\"shop\",True),), ((\"tourism\",\"aquarium\"),), ((\"tourism\",\"gallery\"),),\n ((\"tourism\",\"museum\"),), ((\"tourism\",\"yes\"),), ((\"addr\",True),),\n ]\n blackfilter=[((),),]\n \n # Apply filter\n [Data, Elements]=run_filter('powerlines',\n PBF_inputfile, \n JSON_outputfile, \n prefilter,\n whitefilter, \n blackfilter, \n NewPreFilterData=False, \n CreateElements=True, \n LoadElements=False,\n verbose=True)\n \n print(len(Data['Node']))\n\n # Export data to geojson\n export_geojson(Elements['powerlines']['Way'],Data, filename='/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-com_buildings.geojson',jsontype='Line')\n \n # Read data into geopandas\n gdf = gpd.read_file('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-com_buildings.geojson')\n \n # Write as shapefile\n gdf.to_file('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-com_buildings.shp')\n \n #Plot output\n gdf.plot()\n",
"_____no_output_____"
],
[
"# Getting industrial buildings\ndef get_ind_buildings(area_name): \n \n # Set input/output locations\n PBF_inputfile = os.path.join(os.getcwd(), '/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-latest.osm.pbf')\n JSON_outputfile = os.path.join(os.getcwd(),'/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-ind_buildings.json')\n \n# Pre-filter for all industrial buildings\n prefilter={Node: {}, Way: {\"building\":[\"industrial\",\"digestor\",\"service\",\"transformer_tower\",\"water_tower\",\n \"military\",],\n \"historic\":[\"creamery\",],\n \"man_made\":[\"lighthouse\",\"monitoring_station\",\"observatory\",\n \"pumping_staition\",\"wastewater_plant\",\"water_works\",\n \"works\",],\n \"military\":[\"base\",\"barracks\",],\n \"public_transport\":[\"station\",],\n \"railway\":[\"station\",],\n \"telecom\":[\"data_center\",],\n \"water\":[\"wastewater\",],\n \"waterway\":[\"dock\",\"boatyard\",],\n \"addr\":[True,],\n }, Relation: {}}\n whitefilter = []\n blackfilter = []\n \n # Create initial data\n [Data, _]=run_filter('noname',\n PBF_inputfile, \n JSON_outputfile, \n prefilter,\n whitefilter,\n blackfilter,\n NewPreFilterData=True, \n CreateElements=False, \n LoadElements=False,\n verbose=True) \n # # Check that data exists\n print(len(Data['Node']))\n \n # Get industrial buildings elements\n whitefilter=[((\"building\",\"industrial\"),), ((\"building\",\"digestor\"),), ((\"building\",\"service\"),), ((\"building\",\"transformer_tower\"),), ((\"building\",\"water_tower\"),), ((\"building\",\"military\"),),\n ((\"historic\",\"creamery\"),), ((\"military\",\"barracks\"),), ((\"military\",\"base\"),), ((\"public_transport\",\"station\"),), ((\"railway\",\"station\"),), ((\"telecom\",\"data_center\"),), \n ((\"water\",\"wastewater\"),), ((\"waterway\",\"dock\"),), ((\"waterway\",\"boatyard\"),), ((\"addr\",True),), ((\"man_made\",\"lighthouse\"),), ((\"man_made\",\"monitoring_station\"),),\n ((\"man_made\",\"observatory\"),), ((\"man_made\",\"wastewater_plant\"),), ((\"man_made\",\"pumping_staition\"),), ((\"man_made\",\"water_works\"),), ((\"man_made\",\"works\"),),\n ]\n blackfilter=[((),),]\n \n # Apply filter\n [Data, Elements]=run_filter('powerlines',\n PBF_inputfile, \n JSON_outputfile, \n prefilter,\n whitefilter, \n blackfilter, \n NewPreFilterData=False, \n CreateElements=True, \n LoadElements=False,\n verbose=True)\n \n print(len(Data['Node']))\n #data = Data['Way']\n #df = pd.DataFrame(data)\n #return (df)\n\n #return (Data['Way'])\n\n # Export data to geojson\n export_geojson(Elements['powerlines']['Way'],Data, filename='/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-ind_buildings.geojson',jsontype='Line')\n \n # Read data into geopandas\n gdf = gpd.read_file('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-ind_buildings.geojson')\n \n # Write as shapefile\n gdf.to_file('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/'+area_name+'-ind_buildings.shp')\n \n #Plot output\n gdf.plot()",
"_____no_output_____"
],
[
"# commercial buildings data\nget_com_buildings('alaska')",
"INFO:esy.osmfilter.pre_filter:\u001b[30m\u001b[47mPreFilter OSM GAS DATA\u001b[0m\nINFO:esy.osmfilter.pre_filter:InputFile : \u001b[36m/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-latest.osm.pbf\u001b[0m\nINFO:esy.osmfilter.pre_filter:Size : 118070 kbyte\nINFO:esy.osmfilter.pre_filter:Estimated Time: 16.87 s\nINFO:esy.osmfilter.pre_filter:=============================\nINFO:esy.osmfilter.pre_filter:0.5\nINFO:esy.osmfilter.pre_filter:1\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.3\nINFO:esy.osmfilter.pre_filter:2\nINFO:esy.osmfilter.pre_filter:3\nINFO:esy.osmfilter.pre_filter:Outputfile : \u001b[36m/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-com_buildings.json\u001b[0m\nSize : 10184 kbyte \nTime Elapsed : 203.74 s\n\n\n"
],
[
"# industrial buildings data\nget_ind_buildings('alaska')",
"INFO:esy.osmfilter.pre_filter:\u001b[30m\u001b[47mPreFilter OSM GAS DATA\u001b[0m\nINFO:esy.osmfilter.pre_filter:InputFile : \u001b[36m/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-latest.osm.pbf\u001b[0m\nINFO:esy.osmfilter.pre_filter:Size : 118070 kbyte\nINFO:esy.osmfilter.pre_filter:Estimated Time: 16.87 s\nINFO:esy.osmfilter.pre_filter:=============================\nINFO:esy.osmfilter.pre_filter:0.5\nINFO:esy.osmfilter.pre_filter:1\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.3\nINFO:esy.osmfilter.pre_filter:2\nINFO:esy.osmfilter.pre_filter:3\nINFO:esy.osmfilter.pre_filter:Outputfile : \u001b[36m/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-ind_buildings.json\u001b[0m\nSize : 1426 kbyte \nTime Elapsed : 175.83 s\n\n\n"
],
[
"# residential buildings data\nget_res_buildings('alaska')",
"\u001b[1;30;43mStreaming output truncated to the last 5000 lines.\u001b[0m\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.2\nINFO:esy.osmfilter.pre_filter:1.3\nINFO:esy.osmfilter.pre_filter:2\nINFO:esy.osmfilter.pre_filter:3\nINFO:esy.osmfilter.pre_filter:Outputfile : \u001b[36m/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-res_buildings.json\u001b[0m\nSize : 22623 kbyte \nTime Elapsed : 241.51 s\n\n\n"
],
[
"import pandas as pd\n#Check the size of the data\nres_data=pd.read_json('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-res_buildings.json')\nlen(res_data)",
"_____no_output_____"
],
[
"#Check the size of the data\ncom_data=pd.read_json('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-com_buildings.json')\nlen(com_data)",
"_____no_output_____"
],
[
"#Check the size of the data\nind_data=pd.read_json('/content/drive/MyDrive/ACEP_Data_Team/Railbelt_line/Script_data/alaska-ind_buildings.json')\nlen(ind_data)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e799b288f3c05913209de4110c77065e067fcb2b | 44,605 | ipynb | Jupyter Notebook | intro-to-tensorflow/intro_to_tensorflow.ipynb | postBG/deep-learning | 99f729d1fe2163e3591b7d9a82b784f21fd534e0 | [
"MIT"
] | null | null | null | intro-to-tensorflow/intro_to_tensorflow.ipynb | postBG/deep-learning | 99f729d1fe2163e3591b7d9a82b784f21fd534e0 | [
"MIT"
] | null | null | null | intro-to-tensorflow/intro_to_tensorflow.ipynb | postBG/deep-learning | 99f729d1fe2163e3591b7d9a82b784f21fd534e0 | [
"MIT"
] | null | null | null | 56.749364 | 14,786 | 0.68777 | [
[
[
"<h1 align=\"center\">TensorFlow Neural Network Lab</h1>",
"_____no_output_____"
],
[
"<img src=\"image/notmnist.png\">\nIn this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, <a href=\"http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html\">notMNIST</a>, consists of images of a letter from A to J in different fonts.\n\nThe above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!",
"_____no_output_____"
],
[
"To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print \"`All modules imported`\".",
"_____no_output_____"
]
],
[
[
"import hashlib\nimport os\nimport pickle\nfrom urllib.request import urlretrieve\n\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.utils import resample\nfrom tqdm import tqdm\nfrom zipfile import ZipFile\n\nprint('All modules imported.')",
"All modules imported.\n"
]
],
[
[
"The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).",
"_____no_output_____"
]
],
[
[
"## 이미 로컬로 파일을 다운로드 받았으므로 이제 이것은 돌리지 않아도 됨.\n\ndef download(url, file):\n \"\"\"\n Download file from <url>\n :param url: URL to file\n :param file: Local file path\n \"\"\"\n if not os.path.isfile(file):\n print('Downloading ' + file + '...')\n urlretrieve(url, file)\n print('Download Finished')\n\n# Download the training and test dataset.\ndownload('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')\ndownload('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')",
"_____no_output_____"
],
[
"# Make sure the files aren't corrupted\nassert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\\\n 'notMNIST_train.zip file is corrupted. Remove the file and try again.'\nassert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\\\n 'notMNIST_test.zip file is corrupted. Remove the file and try again.'\n\n# Wait until you see that all files have been downloaded.\nprint('All files downloaded.')",
"All files downloaded.\n"
],
[
"def uncompress_features_labels(file):\n \"\"\"\n Uncompress features and labels from a zip file\n :param file: The zip file to extract the data from\n \"\"\"\n features = []\n labels = []\n\n with ZipFile(file) as zipf:\n # Progress Bar\n filenames_pbar = tqdm(zipf.namelist(), unit='files')\n \n # Get features and labels from all files\n for filename in filenames_pbar:\n # Check if the file is a directory\n if not filename.endswith('/'):\n with zipf.open(filename) as image_file:\n image = Image.open(image_file)\n image.load()\n # Load image data as 1 dimensional array\n # We're using float32 to save on memory space\n feature = np.array(image, dtype=np.float32).flatten()\n\n # Get the the letter from the filename. This is the letter of the image.\n label = os.path.split(filename)[1][0]\n\n features.append(feature)\n labels.append(label)\n return np.array(features), np.array(labels)\n\n# Get the features and labels from the zip files\ntrain_features, train_labels = uncompress_features_labels('notMNIST_train.zip')\ntest_features, test_labels = uncompress_features_labels('notMNIST_test.zip')\n\n# Limit the amount of data to work with a docker container\ndocker_size_limit = 150000\ntrain_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)\n\n# Set flags for feature engineering. This will prevent you from skipping an important step.\nis_features_normal = False\nis_labels_encod = False\n\n# Wait until you see that all features and labels have been uncompressed.\nprint('All features and labels uncompressed.')",
"100%|██████████████████████████████████████████████████████████| 210001/210001 [04:33<00:00, 767.96files/s]\n100%|████████████████████████████████████████████████████████████| 10001/10001 [00:10<00:00, 937.99files/s]\n"
]
],
[
[
"<img src=\"image/Mean_Variance_Image.png\" style=\"height: 75%;width: 75%; position: relative; right: 5%\">\n## Problem 1\nThe first problem involves normalizing the features for your training and test data.\n\nImplement Min-Max scaling in the `normalize_grayscale()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.\n\nSince the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.\n\nMin-Max Scaling:\n$\nX'=a+{\\frac {\\left(X-X_{\\min }\\right)\\left(b-a\\right)}{X_{\\max }-X_{\\min }}}\n$\n\n*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/deep-learning/blob/master/intro-to-tensorflow/intro_to_tensorflow_solution.ipynb).*",
"_____no_output_____"
]
],
[
[
"# Problem 1 - Implement Min-Max scaling for grayscale image data\ndef normalize_grayscale(image_data):\n \"\"\"\n Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]\n :param image_data: The image data to be normalized\n :return: Normalized image data\n \"\"\"\n # TODO: Implement Min-Max scaling for grayscale image data\n a, b = 0.1, 0.9\n min_value, max_value = 0, 255\n \n return a + ((image_data - min_value) * (b - a)) / (max_value - min_value)\n\n### DON'T MODIFY ANYTHING BELOW ###\n# Test Cases\nnp.testing.assert_array_almost_equal(\n normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),\n [0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,\n 0.125098039216, 0.128235294118, 0.13137254902, 0.9],\n decimal=3)\nnp.testing.assert_array_almost_equal(\n normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),\n [0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,\n 0.896862745098, 0.9])\n\nif not is_features_normal:\n train_features = normalize_grayscale(train_features)\n test_features = normalize_grayscale(test_features)\n is_features_normal = True\n\nprint('Tests Passed!')",
"Tests Passed!\n"
],
[
"if not is_labels_encod:\n # Turn labels into numbers and apply One-Hot Encoding\n encoder = LabelBinarizer()\n encoder.fit(train_labels)\n train_labels = encoder.transform(train_labels)\n test_labels = encoder.transform(test_labels)\n\n # Change to float32, so it can be multiplied against the features in TensorFlow, which are float32\n train_labels = train_labels.astype(np.float32)\n test_labels = test_labels.astype(np.float32)\n is_labels_encod = True\n\nprint('Labels One-Hot Encoded')",
"Labels One-Hot Encoded\n"
],
[
"assert is_features_normal, 'You skipped the step to normalize the features'\nassert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'\n\n# Get randomized datasets for training and validation\ntrain_features, valid_features, train_labels, valid_labels = train_test_split(\n train_features,\n train_labels,\n test_size=0.05,\n random_state=832289)\n\nprint('Training features and labels randomized and split.')",
"Training features and labels randomized and split.\n"
],
[
"# Save the data for easy access\npickle_file = 'notMNIST.pickle'\nif not os.path.isfile(pickle_file):\n print('Saving data to pickle file...')\n try:\n with open('notMNIST.pickle', 'wb') as pfile:\n pickle.dump(\n {\n 'train_dataset': train_features,\n 'train_labels': train_labels,\n 'valid_dataset': valid_features,\n 'valid_labels': valid_labels,\n 'test_dataset': test_features,\n 'test_labels': test_labels,\n },\n pfile, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('Unable to save data to', pickle_file, ':', e)\n raise\n\nprint('Data cached in pickle file.')",
"Saving data to pickle file...\nData cached in pickle file.\n"
]
],
[
[
"# Checkpoint\nAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\n# Load the modules\nimport pickle\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n# Reload the data\npickle_file = 'notMNIST.pickle'\nwith open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f)\n train_features = pickle_data['train_dataset']\n train_labels = pickle_data['train_labels']\n valid_features = pickle_data['valid_dataset']\n valid_labels = pickle_data['valid_labels']\n test_features = pickle_data['test_dataset']\n test_labels = pickle_data['test_labels']\n del pickle_data # Free up memory\n\nprint('Data and modules loaded.')",
"Data and modules loaded.\n"
]
],
[
[
"\n## Problem 2\n\nNow it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.\n\n<img src=\"image/network_diagram.png\" style=\"height: 40%;width: 40%; position: relative; right: 10%\">\n\nFor the input here the images have been flattened into a vector of $28 \\times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network. \n\nFor the neural network to train on your data, you need the following <a href=\"https://www.tensorflow.org/resources/dims_types.html#data-types\">float32</a> tensors:\n - `features`\n - Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`)\n - `labels`\n - Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`)\n - `weights`\n - Variable Tensor with random numbers from a truncated normal distribution.\n - See <a href=\"https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal\">`tf.truncated_normal()` documentation</a> for help.\n - `biases`\n - Variable Tensor with all zeros.\n - See <a href=\"https://www.tensorflow.org/api_docs/python/constant_op.html#zeros\"> `tf.zeros()` documentation</a> for help.\n\n*If you're having trouble solving problem 2, review \"TensorFlow Linear Function\" section of the class. If that doesn't help, the solution for this problem is available [here](intro_to_tensorflow_solution.ipynb).*",
"_____no_output_____"
]
],
[
[
"# All the pixels in the image (28 * 28 = 784)\nfeatures_count = 784\n# All the labels\nlabels_count = 10\n\n# TODO: Set the features and labels tensors\nfeatures = tf.placeholder(tf.float32)\nlabels = tf.placeholder(tf.float32)\n\n# TODO: Set the weights and biases tensors\nweights = tf.Variable(tf.truncated_normal((features_count, labels_count)))\nbiases = tf.Variable(tf.zeros(labels_count))\n\n\n### DON'T MODIFY ANYTHING BELOW ###\n\n#Test Cases\nfrom tensorflow.python.ops.variables import Variable\n\nassert features._op.name.startswith('Placeholder'), 'features must be a placeholder'\nassert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'\nassert isinstance(weights, Variable), 'weights must be a TensorFlow variable'\nassert isinstance(biases, Variable), 'biases must be a TensorFlow variable'\n\nassert features._shape == None or (\\\n features._shape.dims[0].value is None and\\\n features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'\nassert labels._shape == None or (\\\n labels._shape.dims[0].value is None and\\\n labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'\nassert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'\nassert biases._variable._shape == (10), 'The shape of biases is incorrect'\n\nassert features._dtype == tf.float32, 'features must be type float32'\nassert labels._dtype == tf.float32, 'labels must be type float32'\n\n# Feed dicts for training, validation, and test session\ntrain_feed_dict = {features: train_features, labels: train_labels}\nvalid_feed_dict = {features: valid_features, labels: valid_labels}\ntest_feed_dict = {features: test_features, labels: test_labels}\n\n# Linear Function WX + b\nlogits = tf.matmul(features, weights) + biases\n\nprediction = tf.nn.softmax(logits)\n\n# Cross entropy\ncross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)\n\n# Training loss\nloss = tf.reduce_mean(cross_entropy)\n\n# Create an operation that initializes all variables\ninit = tf.global_variables_initializer()\n\n# Test Cases\nwith tf.Session() as session:\n session.run(init)\n session.run(loss, feed_dict=train_feed_dict)\n session.run(loss, feed_dict=valid_feed_dict)\n session.run(loss, feed_dict=test_feed_dict)\n biases_data = session.run(biases)\n\nassert not np.count_nonzero(biases_data), 'biases must be zeros'\n\nprint('Tests Passed!')",
"Tests Passed!\n"
],
[
"# Determine if the predictions are correct\nis_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))\n# Calculate the accuracy of the predictions\naccuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))\n\nprint('Accuracy function created.')",
"Accuracy function created.\n"
]
],
[
[
"<img src=\"image/Learn_Rate_Tune_Image.png\" style=\"height: 70%;width: 70%\">\n## Problem 3\nBelow are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.\n\nParameter configurations:\n\nConfiguration 1\n* **Epochs:** 1\n* **Learning Rate:**\n * 0.8\n * 0.5\n * 0.1\n * 0.05\n * 0.01\n\nConfiguration 2\n* **Epochs:**\n * 1\n * 2\n * 3\n * 4\n * 5\n* **Learning Rate:** 0.2\n\nThe code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.\n\n*If you're having trouble solving problem 3, you can view the solution [here](intro_to_tensorflow_solution.ipynb).*",
"_____no_output_____"
]
],
[
[
"# Change if you have memory restrictions\nbatch_size = 128\n\n# TODO: Find the best parameters for each configuration\nepochs = 10\nlearning_rate = 0.08\n\n\n\n### DON'T MODIFY ANYTHING BELOW ###\n# Gradient Descent\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) \n\n# The accuracy measured against the validation set\nvalidation_accuracy = 0.0\n\n# Measurements use for graphing loss and accuracy\nlog_batch_step = 50\nbatches = []\nloss_batch = []\ntrain_acc_batch = []\nvalid_acc_batch = []\n\nwith tf.Session() as session:\n session.run(init)\n batch_count = int(math.ceil(len(train_features)/batch_size))\n\n for epoch_i in range(epochs):\n \n # Progress bar\n batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')\n \n # The training cycle\n for batch_i in batches_pbar:\n # Get a batch of training features and labels\n batch_start = batch_i*batch_size\n batch_features = train_features[batch_start:batch_start + batch_size]\n batch_labels = train_labels[batch_start:batch_start + batch_size]\n\n # Run optimizer and get loss\n _, l = session.run(\n [optimizer, loss],\n feed_dict={features: batch_features, labels: batch_labels})\n\n # Log every 50 batches\n if not batch_i % log_batch_step:\n # Calculate Training and Validation accuracy\n training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)\n validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)\n\n # Log batches\n previous_batch = batches[-1] if batches else 0\n batches.append(log_batch_step + previous_batch)\n loss_batch.append(l)\n train_acc_batch.append(training_accuracy)\n valid_acc_batch.append(validation_accuracy)\n\n # Check accuracy against Validation data\n validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)\n\nloss_plot = plt.subplot(211)\nloss_plot.set_title('Loss')\nloss_plot.plot(batches, loss_batch, 'g')\nloss_plot.set_xlim([batches[0], batches[-1]])\nacc_plot = plt.subplot(212)\nacc_plot.set_title('Accuracy')\nacc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')\nacc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')\nacc_plot.set_ylim([0, 1.0])\nacc_plot.set_xlim([batches[0], batches[-1]])\nacc_plot.legend(loc=4)\nplt.tight_layout()\nplt.show()\n\nprint('Validation accuracy at {}'.format(validation_accuracy))",
"Epoch 1/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:15<00:00, 14.83batches/s]\nEpoch 2/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:00<00:00, 18.32batches/s]\nEpoch 3/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:04<00:00, 17.37batches/s]\nEpoch 4/10: 100%|████████████████████████████████████████████████| 1114/1114 [00:58<00:00, 19.09batches/s]\nEpoch 5/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:02<00:00, 17.69batches/s]\nEpoch 6/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:10<00:00, 15.76batches/s]\nEpoch 7/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:20<00:00, 13.76batches/s]\nEpoch 8/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:00<00:00, 18.38batches/s]\nEpoch 9/10: 100%|████████████████████████████████████████████████| 1114/1114 [00:58<00:00, 18.90batches/s]\nEpoch 10/10: 100%|████████████████████████████████████████████████| 1114/1114 [01:06<00:00, 16.67batches/s]\n"
]
],
[
[
"## Best Hyper-parameters\n\n1. **epochs**: 1, **Learning Rate**: 0.1 -> **Validation accuracy**: 0.734\n2. **epochs**: 5, **Learning Rate**: 0.2 -> **Validation accuracy**: 0.760\n3. **epochs**: 5, **Learning Rate**: 0.1 -> **Validation accuracy**: 0.766",
"_____no_output_____"
],
[
"## Test\nYou're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.",
"_____no_output_____"
]
],
[
[
"### DON'T MODIFY ANYTHING BELOW ###\n# The accuracy measured against the test set\ntest_accuracy = 0.0\n\nwith tf.Session() as session:\n \n session.run(init)\n batch_count = int(math.ceil(len(train_features)/batch_size))\n\n for epoch_i in range(epochs):\n \n # Progress bar\n batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')\n \n # The training cycle\n for batch_i in batches_pbar:\n # Get a batch of training features and labels\n batch_start = batch_i*batch_size\n batch_features = train_features[batch_start:batch_start + batch_size]\n batch_labels = train_labels[batch_start:batch_start + batch_size]\n\n # Run optimizer\n _ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})\n\n # Check accuracy against Test data\n test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)\n\n\nassert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)\nprint('Nice Job! Test Accuracy is {}'.format(test_accuracy))",
"Epoch 1/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:07<00:00, 149.98batches/s]\nEpoch 2/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:07<00:00, 158.93batches/s]\nEpoch 3/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:07<00:00, 157.26batches/s]\nEpoch 4/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:08<00:00, 137.08batches/s]\nEpoch 5/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:10<00:00, 110.91batches/s]\nEpoch 6/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:08<00:00, 126.60batches/s]\nEpoch 7/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:07<00:00, 143.13batches/s]\nEpoch 8/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:07<00:00, 151.78batches/s]\nEpoch 9/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:07<00:00, 141.90batches/s]\nEpoch 10/10: 100%|███████████████████████████████████████████████| 1114/1114 [00:07<00:00, 158.18batches/s]\n"
]
],
[
[
"# Multiple layers\nGood job! You built a one layer TensorFlow network! However, you might want to build more than one layer. This is deep learning after all! In the next section, you will start to satisfy your need for more layers.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e799b5ddb0cbea16ef48975767a9d5fa71c3e390 | 4,346 | ipynb | Jupyter Notebook | examples/travelling salesman problem.ipynb | lmbringas/NeuralMap | 0a8787e79f3985bb188b1b041e3ec7e558c4a742 | [
"MIT"
] | 4 | 2020-10-02T11:46:48.000Z | 2021-05-14T18:20:41.000Z | examples/travelling salesman problem.ipynb | lmbringas/NeuralMap | 0a8787e79f3985bb188b1b041e3ec7e558c4a742 | [
"MIT"
] | 1 | 2021-06-21T18:46:39.000Z | 2021-06-21T18:46:39.000Z | examples/travelling salesman problem.ipynb | lmbringas/NeuralMap | 0a8787e79f3985bb188b1b041e3ec7e558c4a742 | [
"MIT"
] | 1 | 2021-06-13T19:58:19.000Z | 2021-06-13T19:58:19.000Z | 30.180556 | 154 | 0.536815 | [
[
[
"from neural_map import NeuralMap\n\n# https://github.com/DiegoVicen/ntnu-som/blob/master/src/helper.py\n# http://www.math.uwaterloo.ca/tsp/world/countries.html",
"_____no_output_____"
],
[
"import pandas as pd\n\n# towns = pd.read_csv('datasets/ar9152.tsp', delimiter=' ').values[:, [2, 1]]\n# optimal_route_distance = 837377\n\ntowns = pd.read_csv('http://www.math.uwaterloo.ca/tsp/world/uy734.tsp', delimiter=' ', skiprows=lambda x: (x <= 6 or x >= 741)).values[:, [2, 1]]\noptimal_route_distance = 79114",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n\ndef euclidean_distance_2d(X, Y):\n return ((X[0] - Y[0]) ** 2 + (X[1] - Y[1]) ** 2) ** (1/2)\n\ndef tsp(nm_instance, points, optimal_route_distance):\n # find nearest neuron for each point\n city_neurons = {}\n for city_idx, city in enumerate(points):\n idx = nm_instance.get_best_matching_unit(city)[1]\n if idx not in city_neurons:\n city_neurons[idx] = [city]\n else:\n print(\"hola\")\n city_neurons[idx].append(city)\n\n # order cities according to neuron order\n tsp_order = []\n for neuron_idx in range(nm_instance.rows):\n if neuron_idx in city_neurons:\n tsp_order += city_neurons[neuron_idx]\n\n # calculate tsp distance for tsp_order\n tsp_distance = euclidean_distance_2d(tsp_order[0], tsp_order[-1])\n for idx in range(len(tsp_order)-1):\n tsp_distance += euclidean_distance_2d(tsp_order[idx], tsp_order[idx + 1])\n \n # print total distance, optimal distance, and their relation\n response = \"Travelling Salesman Problem\"\n response += \"\\n total distance: \" + str(int(tsp_distance))\n response += \"\\n optimal route ristance: \" + str(int(optimal_route_distance))\n response += \"\\n total distance as percentage of optimal distance: \" + str(int(100 * tsp_distance / optimal_route_distance)) + \"%\"\n print(response)\n \n # visualize route\n n_towns = points.shape[0]\n nodes = nm_instance.weights.reshape(-1, 2)\n plt.figure(figsize=(12,10))\n plt.scatter(points[:, 0], points[:, 1])\n for i in range(n_towns * factor):\n first = nodes[i % (n_towns * factor)]\n second = nodes[(i + 1) % (n_towns * factor)]\n plt.plot((first[0], second[0]), (first[1], second[1]))\n plt.show()",
"_____no_output_____"
],
[
"factor = 6\nn_towns = towns.shape[0]\n\nnm = NeuralMap(variables=2, metric='euclidean', columns=1, rows=n_towns * factor, hexagonal=False, toroidal=True)\nnm.train(data=towns)",
"_____no_output_____"
],
[
"tsp(nm, towns, optimal_route_distance)",
"_____no_output_____"
],
[
"# import json\n\n# with open(\"datasets/argentina_som.json\", 'w') as outfile:\n# json.dump(nm_dict, outfile)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e799bd73f9218d726713d71c81f123fc2c5dd1ec | 4,243 | ipynb | Jupyter Notebook | test/test-grade/notebooks/fails6H.ipynb | chrispyles/otter-grader | bca8061450412c9d8e4a53f1641711fb522b6b33 | [
"BSD-3-Clause"
] | 76 | 2020-01-24T07:18:34.000Z | 2022-03-16T01:16:28.000Z | test/test-grade/notebooks/fails6H.ipynb | chrispyles/otter-grader | bca8061450412c9d8e4a53f1641711fb522b6b33 | [
"BSD-3-Clause"
] | 413 | 2019-10-07T03:49:51.000Z | 2022-03-29T18:23:05.000Z | test/test-grade/notebooks/fails6H.ipynb | chrispyles/otter-grader | bca8061450412c9d8e4a53f1641711fb522b6b33 | [
"BSD-3-Clause"
] | 41 | 2020-01-24T21:45:43.000Z | 2022-03-14T16:11:55.000Z | 18.132479 | 102 | 0.477964 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\nimport otter\ngrader = otter.Notebook(\"../tests\")",
"_____no_output_____"
]
],
[
[
"**Question 1:** Write a function `square` that squares its argument.",
"_____no_output_____"
]
],
[
[
"def square(x):\n return x**2",
"_____no_output_____"
],
[
"grader.check(\"q1\")",
"_____no_output_____"
]
],
[
[
"**Question 2:** Write a function `negate` that negates its argument.",
"_____no_output_____"
]
],
[
[
"def negate(x):\n return not x",
"_____no_output_____"
],
[
"grader.check(\"q2\")",
"_____no_output_____"
]
],
[
[
"**Question 3:** Assign `x` to the negation of `[]`. Use `negate`.",
"_____no_output_____"
]
],
[
[
"x = negate([])\nx",
"_____no_output_____"
],
[
"grader.check(\"q3\")",
"_____no_output_____"
]
],
[
[
"**Question 4:** Assign `x` to the square of 6.25. Use `square`.",
"_____no_output_____"
]
],
[
[
"x = square(6.25)\nx",
"_____no_output_____"
],
[
"grader.check(\"q4\")",
"_____no_output_____"
]
],
[
[
"**Question 5:** Plot $f(x) = \\cos (x e^x)$ on $(0,10)$.",
"_____no_output_____"
]
],
[
[
"x = np.linspace(0, 10, 100)\ny = np.cos(x * np.exp(x))\nplt.plot(x, y)",
"_____no_output_____"
]
],
[
[
"**Question 6:** Write a non-recursive infinite generator for the Fibonacci sequence `fiberator`.",
"_____no_output_____"
]
],
[
[
"def fiberator():\n yield 0\n yield 1\n a, b = 0, 1\n while True:\n a, b = b, a + b\n yield a",
"_____no_output_____"
],
[
"grader.check(\"q6\")",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e799c4f81a68878c396a5c3fce1f01640e12d9ef | 17,754 | ipynb | Jupyter Notebook | docs/source/example_notebooks/do_sampler_demo.ipynb | Causal-Inference-ZeroToAll/dowhy | ac03e857de0cf31283323546ca16dd1cebc34eba | [
"MIT"
] | 12 | 2020-08-05T09:48:27.000Z | 2022-02-07T15:52:24.000Z | docs/source/example_notebooks/do_sampler_demo.ipynb | Causal-Inference-ZeroToAll/dowhy_zh | ac03e857de0cf31283323546ca16dd1cebc34eba | [
"MIT"
] | 1 | 2020-03-18T11:47:43.000Z | 2020-03-18T11:47:43.000Z | docs/source/example_notebooks/do_sampler_demo.ipynb | Causal-Inference-ZeroToAll/dowhy | ac03e857de0cf31283323546ca16dd1cebc34eba | [
"MIT"
] | 1 | 2020-08-05T18:08:55.000Z | 2020-08-05T18:08:55.000Z | 47.218085 | 2,220 | 0.713248 | [
[
[
"**FAQ:**\n\n- weighting do sampler `dowhy.do_samplers.weighting_sampler.WeightingSampler` 是什么?应该是一个使用倾向得分估计(Logistic Regression) 的判别模型。",
"_____no_output_____"
],
[
"# Do-sampler 简介\n\n--- by Adam Kelleher, Heyang Gong 编译\n\nThe \"do-sampler\" is a new feature in DoWhy. 尽管大多数以潜在结果为导向的估算器都专注于估计 the specific contrast $E[Y_0 - Y_1]$, Pearlian inference 专注于更基本的因果量,如反事实结果的分布$P(Y^x = y)$, 它可以用来得出其他感兴趣的统计信息。",
"_____no_output_____"
],
[
"通常,很难非参数地表示概率分布。即使可以,您也不想 gloss over finite-sample problems with you data you used to generate it. 考虑到这些问题,我们决定通过使用称为“ do-sampler”的对象从它们中进行采样来表示干预性分布。利用这些样本,我们可以希望 compute finite-sample statistics of our interventional data. 如果我们 bootstrap 许多这样的样本,我们甚至可以期待得到这些统计量的 good sampling distributions. \n\nThe user should not 这仍然是一个活跃的研究领域,so you should be careful about being too confident in bootstrapped error bars from do-samplers.\n\nNote that do samplers sample from the outcome distribution, and so will vary significantly from sample to sample. To use them to compute outcomes, 我们推荐 generate several such samples to get an idea of the posterior variance of your statistic of interest.",
"_____no_output_____"
],
[
"## Pearlian 干预\n\nFollowing the notion of an intervention in a Pearlian causal model, 我们的 do-samplers 顺序执行如下步骤:\n\n1. Disrupt causes\n2. Make Effective\n3. Propagate and sample",
"_____no_output_____"
],
[
"在第一阶段,我们设想 cutting the in-edges to all of the variables we're intervening on. 在第二阶段,我们将这些变量的值设置为 their interventional quantities。在第三阶段,我们通过模型向前传播该值 to compute interventional outcomes with a sampling procedure.\n\n在实践中,我们可以通过多种方式来实现这些步骤。 They're most explicit when we build the model as a linear bayesian network in PyMC3, which is what underlies the MCMC do sampler. In that case, we fit one bayesian network to the data, then construct a new network representing the interventional network. The structural equations are set with the parameters fit in the initial network, and we sample from that new network to get our do sample.\n\nIn the **weighting do sampler**, we abstractly think of \"disrupting the causes\" by accounting for selection into the causal state through propensity score estimation. These scores contain the information used to block back-door paths, and so have the same statistics effect as cutting edges into the causal state. We make the treatment effective by selecting the subset of our data set with the correct value of the causal state. Finally, we generated a weighted random sample using inverse propensity weighting to get our do sample.\n\n\n您可以通过其他方法来实现这三个步骤, but the formula is the same. We've abstracted them out as abstract class methods which you should override if you'd like to create your own do sampler!\n\n我们实现的 do sampler 有三个特点: Statefulness, Integration 和 Specifying interventions.",
"_____no_output_____"
],
[
"### Statefulness\n\nThe do sampler when accessed through the high-level pandas API is stateless by default. This makes it intuitive to work with, and you can generate different samples with repeated calls to the `pandas.DataFrame.causal.do`. It can be made stateful, which is sometimes useful. \n\n我们之前提到的三阶段流程已 is implemented by passing an internal `pandas.DataFrame` through each of the three stages, but regarding it as temporary. The internal dataframe is reset by default before returning the result.\n\nIt can be much more efficient to maintain state in the do sampler between generating samples. This is especially true when step 1 requires fitting an expensive model, as is the case with the MCMC do sampler, the kernel density sampler, and the weighting sampler. \n\n(只拟合一次模型) Instead of re-fitting the model for each sample, you'd like to fit it once, and then generate many samples from the do sampler. You can do this by setting the kwarg `stateful=True` when you call the `pandas.DataFrame.causal.do` method. To reset the state of the dataframe (deleting the model as well as the internal dataframe), you can call the `pandas.DataFrame.causal.reset` method.\n\nThrough the lower-level API, the sampler 默认是无需申明的。 The assumption is that a \"power user\" who is using the low-level API will want more control over the sampling process. In this case, state is carried by internal dataframe `self._df`, which is a copy of the dataframe passed on instantiation. The original dataframe is kept in `self._data`, and is used when the user resets state. ",
"_____no_output_____"
],
[
"### Integration\n\nThe do-sampler is built on top of the identification abstraction used throughout DoWhy. It uses a `dowhy.CausalModel` to perform identification, and builds any models it needs automatically using this identification.",
"_____no_output_____"
],
[
"### Specifying Interventions\n\nThere is a kwarg on the `dowhy.do_sampler.DoSampler` object called `keep_original_treatment`. While an intervention might be to set all units treatment values to some specific value, it's often natural to keep them set as they were, and instead remove confounding bias during effect estimation. If you'd prefer not to specify an intervention, you can set the kwarg like `keep_original_treatment=True`, and the second stage of the 3-stage process will be skipped. In that case, any intervention specified on sampling will be ignored.\n\nIf the `keep_original_treatment` flag is set to false (it is by default), then you must specify an intervention when you sample from the do sampler. For details, see the demo below!",
"_____no_output_____"
],
[
"## Demo\n\n首先,让我们生成一些数据和一个因果模型。Here, Z confounds our causal state, D, with the outcome, Y.",
"_____no_output_____"
]
],
[
[
"import os, sys\nsys.path.append(os.path.abspath(\"../../../\"))\n\nimport numpy as np\nimport pandas as pd\nimport dowhy.api",
"_____no_output_____"
],
[
"N = 5000\nz = np.random.uniform(size=N)\nd = np.random.binomial(1., p=1./(1. + np.exp(-5. * z)))\ny = 2. * z + d + 0.1 * np.random.normal(size=N)\ndf = pd.DataFrame({'Z': z, 'D': d, 'Y': y})\n(df[df.D == 1].mean() - df[df.D == 0].mean())['Y']",
"_____no_output_____"
]
],
[
[
"结果比真实的因果效应高 60%. 那么,让我们为这些数据建立因果模型。",
"_____no_output_____"
]
],
[
[
"from dowhy import CausalModel\n\ncauses = ['D']\noutcomes = ['Y']\ncommon_causes = ['Z']\n\nmodel = CausalModel(df, \n causes,\n outcomes,\n common_causes=common_causes,\n proceed_when_unidentifiable=True)",
"WARNING:dowhy.causal_model:Causal Graph not provided. DoWhy will construct a graph based on data inputs.\nINFO:dowhy.causal_graph:If this is observed data (not from a randomized experiment), there might always be missing confounders. Adding a node named \"Unobserved Confounders\" to reflect this.\nINFO:dowhy.causal_model:Model to find the causal effect of treatment ['D'] on outcome ['Y']\n"
]
],
[
[
"Now that we have a model, we can try to identify the causal effect.",
"_____no_output_____"
]
],
[
[
"identification = model.identify_effect()",
"INFO:dowhy.causal_identifier:Common causes of treatment and outcome:['U', 'Z']\nWARNING:dowhy.causal_identifier:If this is observed data (not from a randomized experiment), there might always be missing confounders. Causal effect cannot be identified perfectly.\nINFO:dowhy.causal_identifier:Continuing by ignoring these unobserved confounders because proceed_when_unidentifiable flag is True.\nINFO:dowhy.causal_identifier:Instrumental variables for treatment and outcome:[]\n"
]
],
[
[
"Identification works! We didn't actually need to do this yet, since it will happen internally with the do sampler, but it can't hurt to check that identification works before proceeding. Now, let's build the sampler.",
"_____no_output_____"
]
],
[
[
"from dowhy.do_samplers.weighting_sampler import WeightingSampler\n\nsampler = WeightingSampler(df,\n causal_model=model,\n keep_original_treatment=True,\n variable_types={'D': 'b', 'Z': 'c', 'Y': 'c'})",
"INFO:dowhy.causal_identifier:Common causes of treatment and outcome:['U', 'Z']\nWARNING:dowhy.causal_identifier:If this is observed data (not from a randomized experiment), there might always be missing confounders. Causal effect cannot be identified perfectly.\nINFO:dowhy.causal_identifier:Continuing by ignoring these unobserved confounders because proceed_when_unidentifiable flag is True.\nINFO:dowhy.causal_identifier:Instrumental variables for treatment and outcome:[]\nINFO:dowhy.do_sampler:Using WeightingSampler for do sampling.\nINFO:dowhy.do_sampler:Caution: do samplers assume iid data.\n"
]
],
[
[
"Now, we can just sample from the interventional distribution! Since we set the `keep_original_treatment` flag to `False`, any treatment we pass here will be ignored. Here, we'll just pass `None` to acknowledge that we know we don't want to pass anything.\n\nIf you'd prefer to specify an intervention, you can just put the interventional value here instead as a list or numpy array.\n",
"_____no_output_____"
]
],
[
[
"interventional_df = sampler.do_sample(None)",
"_____no_output_____"
],
[
"(interventional_df[interventional_df.D == 1].mean() - interventional_df[interventional_df.D == 0].mean())['Y']",
"_____no_output_____"
]
],
[
[
"现在我们的结果更接近真实效应 1.0!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e799c8e0400631a25a4cd3d171a63f482310e277 | 12,617 | ipynb | Jupyter Notebook | docs/_downloads/c4bf1a4ba1714ace73ad54fe5c6d9d00/pytorch_tutorial.ipynb | leejh1230/PyTorch-tutorials-kr | ebbf44b863ff96c597631e28fc194eafa590c9eb | [
"BSD-3-Clause"
] | 1 | 2019-12-05T05:16:44.000Z | 2019-12-05T05:16:44.000Z | docs/_downloads/c4bf1a4ba1714ace73ad54fe5c6d9d00/pytorch_tutorial.ipynb | leejh1230/PyTorch-tutorials-kr | ebbf44b863ff96c597631e28fc194eafa590c9eb | [
"BSD-3-Clause"
] | null | null | null | docs/_downloads/c4bf1a4ba1714ace73ad54fe5c6d9d00/pytorch_tutorial.ipynb | leejh1230/PyTorch-tutorials-kr | ebbf44b863ff96c597631e28fc194eafa590c9eb | [
"BSD-3-Clause"
] | null | null | null | 47.432331 | 1,201 | 0.605849 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\nIntroduction to PyTorch\n***********************\n\nIntroduction to Torch's tensor library\n======================================\n\nAll of deep learning is computations on tensors, which are\ngeneralizations of a matrix that can be indexed in more than 2\ndimensions. We will see exactly what this means in-depth later. First,\nlets look what we can do with tensors.\n",
"_____no_output_____"
]
],
[
[
"# Author: Robert Guthrie\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\ntorch.manual_seed(1)",
"_____no_output_____"
]
],
[
[
"Creating Tensors\n~~~~~~~~~~~~~~~~\n\nTensors can be created from Python lists with the torch.Tensor()\nfunction.\n\n\n",
"_____no_output_____"
]
],
[
[
"# torch.tensor(data) creates a torch.Tensor object with the given data.\nV_data = [1., 2., 3.]\nV = torch.tensor(V_data)\nprint(V)\n\n# Creates a matrix\nM_data = [[1., 2., 3.], [4., 5., 6]]\nM = torch.tensor(M_data)\nprint(M)\n\n# Create a 3D tensor of size 2x2x2.\nT_data = [[[1., 2.], [3., 4.]],\n [[5., 6.], [7., 8.]]]\nT = torch.tensor(T_data)\nprint(T)",
"_____no_output_____"
]
],
[
[
"What is a 3D tensor anyway? Think about it like this. If you have a\nvector, indexing into the vector gives you a scalar. If you have a\nmatrix, indexing into the matrix gives you a vector. If you have a 3D\ntensor, then indexing into the tensor gives you a matrix!\n\nA note on terminology:\nwhen I say \"tensor\" in this tutorial, it refers\nto any torch.Tensor object. Matrices and vectors are special cases of\ntorch.Tensors, where their dimension is 1 and 2 respectively. When I am\ntalking about 3D tensors, I will explicitly use the term \"3D tensor\".\n\n\n",
"_____no_output_____"
]
],
[
[
"# Index into V and get a scalar (0 dimensional tensor)\nprint(V[0])\n# Get a Python number from it\nprint(V[0].item())\n\n# Index into M and get a vector\nprint(M[0])\n\n# Index into T and get a matrix\nprint(T[0])",
"_____no_output_____"
]
],
[
[
"You can also create tensors of other datatypes. The default, as you can\nsee, is Float. To create a tensor of integer types, try\ntorch.LongTensor(). Check the documentation for more data types, but\nFloat and Long will be the most common.\n\n\n",
"_____no_output_____"
],
[
"You can create a tensor with random data and the supplied dimensionality\nwith torch.randn()\n\n\n",
"_____no_output_____"
]
],
[
[
"x = torch.randn((3, 4, 5))\nprint(x)",
"_____no_output_____"
]
],
[
[
"Operations with Tensors\n~~~~~~~~~~~~~~~~~~~~~~~\n\nYou can operate on tensors in the ways you would expect.\n\n",
"_____no_output_____"
]
],
[
[
"x = torch.tensor([1., 2., 3.])\ny = torch.tensor([4., 5., 6.])\nz = x + y\nprint(z)",
"_____no_output_____"
]
],
[
[
"See `the documentation <https://pytorch.org/docs/torch.html>`__ for a\ncomplete list of the massive number of operations available to you. They\nexpand beyond just mathematical operations.\n\nOne helpful operation that we will make use of later is concatenation.\n\n\n",
"_____no_output_____"
]
],
[
[
"# By default, it concatenates along the first axis (concatenates rows)\nx_1 = torch.randn(2, 5)\ny_1 = torch.randn(3, 5)\nz_1 = torch.cat([x_1, y_1])\nprint(z_1)\n\n# Concatenate columns:\nx_2 = torch.randn(2, 3)\ny_2 = torch.randn(2, 5)\n# second arg specifies which axis to concat along\nz_2 = torch.cat([x_2, y_2], 1)\nprint(z_2)\n\n# If your tensors are not compatible, torch will complain. Uncomment to see the error\n# torch.cat([x_1, x_2])",
"_____no_output_____"
]
],
[
[
"Reshaping Tensors\n~~~~~~~~~~~~~~~~~\n\nUse the .view() method to reshape a tensor. This method receives heavy\nuse, because many neural network components expect their inputs to have\na certain shape. Often you will need to reshape before passing your data\nto the component.\n\n\n",
"_____no_output_____"
]
],
[
[
"x = torch.randn(2, 3, 4)\nprint(x)\nprint(x.view(2, 12)) # Reshape to 2 rows, 12 columns\n# Same as above. If one of the dimensions is -1, its size can be inferred\nprint(x.view(2, -1))",
"_____no_output_____"
]
],
[
[
"Computation Graphs and Automatic Differentiation\n================================================\n\nThe concept of a computation graph is essential to efficient deep\nlearning programming, because it allows you to not have to write the\nback propagation gradients yourself. A computation graph is simply a\nspecification of how your data is combined to give you the output. Since\nthe graph totally specifies what parameters were involved with which\noperations, it contains enough information to compute derivatives. This\nprobably sounds vague, so let's see what is going on using the\nfundamental flag ``requires_grad``.\n\nFirst, think from a programmers perspective. What is stored in the\ntorch.Tensor objects we were creating above? Obviously the data and the\nshape, and maybe a few other things. But when we added two tensors\ntogether, we got an output tensor. All this output tensor knows is its\ndata and shape. It has no idea that it was the sum of two other tensors\n(it could have been read in from a file, it could be the result of some\nother operation, etc.)\n\nIf ``requires_grad=True``, the Tensor object keeps track of how it was\ncreated. Lets see it in action.\n\n\n",
"_____no_output_____"
]
],
[
[
"# Tensor factory methods have a ``requires_grad`` flag\nx = torch.tensor([1., 2., 3], requires_grad=True)\n\n# With requires_grad=True, you can still do all the operations you previously\n# could\ny = torch.tensor([4., 5., 6], requires_grad=True)\nz = x + y\nprint(z)\n\n# BUT z knows something extra.\nprint(z.grad_fn)",
"_____no_output_____"
]
],
[
[
"So Tensors know what created them. z knows that it wasn't read in from\na file, it wasn't the result of a multiplication or exponential or\nwhatever. And if you keep following z.grad_fn, you will find yourself at\nx and y.\n\nBut how does that help us compute a gradient?\n\n\n",
"_____no_output_____"
]
],
[
[
"# Lets sum up all the entries in z\ns = z.sum()\nprint(s)\nprint(s.grad_fn)",
"_____no_output_____"
]
],
[
[
"So now, what is the derivative of this sum with respect to the first\ncomponent of x? In math, we want\n\n\\begin{align}\\frac{\\partial s}{\\partial x_0}\\end{align}\n\n\n\nWell, s knows that it was created as a sum of the tensor z. z knows\nthat it was the sum x + y. So\n\n\\begin{align}s = \\overbrace{x_0 + y_0}^\\text{$z_0$} + \\overbrace{x_1 + y_1}^\\text{$z_1$} + \\overbrace{x_2 + y_2}^\\text{$z_2$}\\end{align}\n\nAnd so s contains enough information to determine that the derivative\nwe want is 1!\n\nOf course this glosses over the challenge of how to actually compute\nthat derivative. The point here is that s is carrying along enough\ninformation that it is possible to compute it. In reality, the\ndevelopers of Pytorch program the sum() and + operations to know how to\ncompute their gradients, and run the back propagation algorithm. An\nin-depth discussion of that algorithm is beyond the scope of this\ntutorial.\n\n\n",
"_____no_output_____"
],
[
"Lets have Pytorch compute the gradient, and see that we were right:\n(note if you run this block multiple times, the gradient will increment.\nThat is because Pytorch *accumulates* the gradient into the .grad\nproperty, since for many models this is very convenient.)\n\n\n",
"_____no_output_____"
]
],
[
[
"# calling .backward() on any variable will run backprop, starting from it.\ns.backward()\nprint(x.grad)",
"_____no_output_____"
]
],
[
[
"Understanding what is going on in the block below is crucial for being a\nsuccessful programmer in deep learning.\n\n\n",
"_____no_output_____"
]
],
[
[
"x = torch.randn(2, 2)\ny = torch.randn(2, 2)\n# By default, user created Tensors have ``requires_grad=False``\nprint(x.requires_grad, y.requires_grad)\nz = x + y\n# So you can't backprop through z\nprint(z.grad_fn)\n\n# ``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad``\n# flag in-place. The input flag defaults to ``True`` if not given.\nx = x.requires_grad_()\ny = y.requires_grad_()\n# z contains enough information to compute gradients, as we saw above\nz = x + y\nprint(z.grad_fn)\n# If any input to an operation has ``requires_grad=True``, so will the output\nprint(z.requires_grad)\n\n# Now z has the computation history that relates itself to x and y\n# Can we just take its values, and **detach** it from its history?\nnew_z = z.detach()\n\n# ... does new_z have information to backprop to x and y?\n# NO!\nprint(new_z.grad_fn)\n# And how could it? ``z.detach()`` returns a tensor that shares the same storage\n# as ``z``, but with the computation history forgotten. It doesn't know anything\n# about how it was computed.\n# In essence, we have broken the Tensor away from its past history",
"_____no_output_____"
]
],
[
[
"You can also stop autograd from tracking history on Tensors\nwith ``.requires_grad``=True by wrapping the code block in\n``with torch.no_grad():``\n\n",
"_____no_output_____"
]
],
[
[
"print(x.requires_grad)\nprint((x ** 2).requires_grad)\n\nwith torch.no_grad():\n\tprint((x ** 2).requires_grad)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e799f43c94b2a5bbe8270d6c4414e34f8106d13b | 22,564 | ipynb | Jupyter Notebook | day4.ipynb | mszzukowski/dw_matrix_car | bbc8771d8cc8049b4f894649a07575773d0dc485 | [
"MIT"
] | null | null | null | day4.ipynb | mszzukowski/dw_matrix_car | bbc8771d8cc8049b4f894649a07575773d0dc485 | [
"MIT"
] | null | null | null | day4.ipynb | mszzukowski/dw_matrix_car | bbc8771d8cc8049b4f894649a07575773d0dc485 | [
"MIT"
] | null | null | null | 22,564 | 22,564 | 0.561026 | [
[
[
"# !pip install --upgrade tables\n# !pip install eli5\n# !pip install xgboost",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.dummy import DummyRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\nimport xgboost as xgb\n\nfrom sklearn.metrics import mean_absolute_error as mae\nfrom sklearn.model_selection import cross_val_score\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance",
"_____no_output_____"
],
[
"cd '/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car'",
"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car\n"
],
[
"df = pd.read_hdf('./data/car.h5')\n\ndf.shape",
"_____no_output_____"
]
],
[
[
"##Feature Engenering",
"_____no_output_____"
]
],
[
[
"SUFFIX_CAT = '__cat'\n\nfor feat in df.columns:\n if isinstance( df[feat][0], list): continue\n\n factorized_values = df[feat].factorize()[0]\n if SUFFIX_CAT in feat:\n df[feat] = factorized_values\n else:\n df[feat + SUFFIX_CAT] = df[feat].factorize()[0]",
"_____no_output_____"
],
[
"cat_feats = [x for x in df.columns if SUFFIX_CAT in x]\ncat_feats = [x for x in cat_feats if 'price' not in x]\nlen(cat_feats)",
"_____no_output_____"
],
[
"def run_model(model, feats):\n X = df[feats].values\n y = df['price_value'].values\n\n scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')\n return np.mean(scores), np.std(scores)",
"_____no_output_____"
]
],
[
[
"## DecisionTree",
"_____no_output_____"
]
],
[
[
"run_model(DecisionTreeRegressor(max_depth=5), cat_feats)",
"_____no_output_____"
]
],
[
[
"## Random Forest",
"_____no_output_____"
]
],
[
[
"model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0)\nrun_model(model=model, feats=cat_feats)",
"_____no_output_____"
]
],
[
[
"##XGBoost",
"_____no_output_____"
]
],
[
[
"xgb_params = {\n 'max_depth': 5,\n 'n_estimators': 50,\n 'learning_rate': 0.1,\n 'seed': 0\n}\nrun_model(xgb.XGBRegressor(**xgb_params), cat_feats)",
"[12:48:38] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[12:48:55] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[12:49:12] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"m = xgb.XGBRegressor(**xgb_params)\nm.fit(X,y)\n\nimp = PermutationImportance(m, random_state=0).fit(X,y)\neli5.show_weights(imp, feature_names=cat_feats)",
"[13:02:30] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"feats = [\n 'param_napęd__cat',\n'param_rok-produkcji',\n'param_stan__cat',\n'param_skrzynia-biegów__cat',\n'param_faktura-vat__cat',\n'param_moc',\n'param_marka-pojazdu__cat',\n'feature_kamera-cofania__cat',\n'param_typ__cat',\n'param_pojemność-skokowa',\n'seller_name__cat',\n'feature_wspomaganie-kierownicy__cat',\n'param_model-pojazdu__cat',\n'param_wersja__cat',\n'param_kod-silnika__cat',\n'feature_system-start-stop__cat',\n'feature_asystent-pasa-ruchu__cat',\n'feature_czujniki-parkowania-przednie__cat',\n'feature_łopatki-zmiany-biegów__cat',\n'feature_regulowane-zawieszenie__cat',\n ]",
"_____no_output_____"
],
[
"run_model(xgb.XGBRegressor(**xgb_params), feats)",
"[13:24:53] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[13:24:56] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n[13:25:00] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x : -1 if str(x) == 'None' else int(x))",
"_____no_output_____"
],
[
"\ndf['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else x.split(' ')[0])",
"_____no_output_____"
],
[
"df['param_pojemność-skokowa'].unique()",
"_____no_output_____"
],
[
"df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else x.split('cm')[0].replace(' ',''))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e799f9533181ae8c2974228aed888678f3c8f7b6 | 12,566 | ipynb | Jupyter Notebook | notebook/perturbation/02-perturbation_differential.ipynb | siravan/SciMLTutorials.jl | 34f4044f96facf351eb32c991b73325a32d17a94 | [
"MIT"
] | null | null | null | notebook/perturbation/02-perturbation_differential.ipynb | siravan/SciMLTutorials.jl | 34f4044f96facf351eb32c991b73325a32d17a94 | [
"MIT"
] | null | null | null | notebook/perturbation/02-perturbation_differential.ipynb | siravan/SciMLTutorials.jl | 34f4044f96facf351eb32c991b73325a32d17a94 | [
"MIT"
] | null | null | null | 44.091228 | 1,814 | 0.583638 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e799f9be5bebbd7201dde3908ab0289814c24d0e | 1,970 | ipynb | Jupyter Notebook | homework/03 Search, list functions/donow/Moriarty_Sean_3_donow.ipynb | barjacks/algorithms_mine | bc248ed9ebb88aed73c6e8da3d3b9553d9173cdd | [
"MIT"
] | null | null | null | homework/03 Search, list functions/donow/Moriarty_Sean_3_donow.ipynb | barjacks/algorithms_mine | bc248ed9ebb88aed73c6e8da3d3b9553d9173cdd | [
"MIT"
] | null | null | null | homework/03 Search, list functions/donow/Moriarty_Sean_3_donow.ipynb | barjacks/algorithms_mine | bc248ed9ebb88aed73c6e8da3d3b9553d9173cdd | [
"MIT"
] | null | null | null | 17.747748 | 45 | 0.465482 | [
[
[
"def find_number(input_list, query):\n index=0\n index_list=[]\n for number in input_list:\n if number==query:\n index_list.append(index)\n index+=1\n if index_list:\n return index_list\n else:\n return 'Number not found'",
"_____no_output_____"
],
[
"l1=[1, 4, 5, 3, 6, 4, 7, 8, 3, 4, 5, 2]",
"_____no_output_____"
],
[
"find_number(l1, 4)",
"_____no_output_____"
],
[
"find_number(l1, 0)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e799fa04bf1c506249c64d7ddf905e2210eb7cd2 | 8,159 | ipynb | Jupyter Notebook | 2-Quantum_Algorithms/.ipynb_checkpoints/6-Shor_s_Algorithm-checkpoint.ipynb | klezm/Reference-Guide-For-Quantum-Computing-A-Microsoft-Garage-Project | 7ad48da242dfc097ce9048168283b0367e3b1eb3 | [
"MIT"
] | 331 | 2021-04-29T13:39:04.000Z | 2022-03-19T13:31:13.000Z | 2-Quantum_Algorithms/.ipynb_checkpoints/6-Shor_s_Algorithm-checkpoint.ipynb | klezm/Reference-Guide-For-Quantum-Computing-A-Microsoft-Garage-Project | 7ad48da242dfc097ce9048168283b0367e3b1eb3 | [
"MIT"
] | 7 | 2021-05-23T21:47:46.000Z | 2022-03-06T00:37:06.000Z | 2-Quantum_Algorithms/.ipynb_checkpoints/6-Shor_s_Algorithm-checkpoint.ipynb | klezm/Reference-Guide-For-Quantum-Computing-A-Microsoft-Garage-Project | 7ad48da242dfc097ce9048168283b0367e3b1eb3 | [
"MIT"
] | 48 | 2021-04-29T18:00:43.000Z | 2022-02-17T17:39:04.000Z | 63.742188 | 682 | 0.663317 | [
[
[
"Shor's algorithm was invented by Peter Shor in 1994 as a quantum computing algorithm to efficiently find prime factors of an integer. It is an important algorithm because the current widely used RSA cryptography scheme relies on the fact that large integers take unfeasible amount of time to factor on classical computers. With Shor's algorithm implemented when quantum computers are ready, existing cryptography will be broken.\n\nLet's say there are two large prime numbers $\\textit{p}$ and $\\textit{q}$. You are given the product of them 푝푝.푞푞, not the individual 푝푝 and 푞푞. If 푝푝.푞푞 is a small number, you can calculate possible values of 푝푝 and 푞푞. But if the product is large, there is no classical algorithm invented so far to give the components 푝푝 and 푞푞 in polynomial time. That is, if we try to factor a 2048 bit number which is a product of two large prime numbers, it will take more than the age of the universe even if you use the fastest supercomputer. However, using the Shor's algorithm, we can do it within in a few hours provided we have a quantum computer with a few thousand qubits.\n\nTo demonstrate this algorithm, let's take 퐶퐶= 35 as the number we want to factorize, and we want to know 푝푝 and 푞푞 that this 퐶퐶 is composed of.\n\nStep 1:\n\nPick a random number 푎푎 between 2 and 퐶퐶− 1 and take the greatest common denominator (GCD) with 퐶퐶. We need to remember that GCD of large numbers can be found in polynomial time using Euclidean algorithm. If the GCD does not equal to 1 (GCD != 1), then we can say that the GCD is one of the factor and we find the solution. For example, if we had chosen 푎푎 = 10 , then GCD (10, 35) = 5. Since it is not equal to 1, then 5 must be the factor. However, the probability of finding this random number to be the solution is extremely low. If we took 푎푎 = 3, then GCD (3,35) = 1, i.e. 푎푎 and 퐶퐶 are co-primes.\n\nStep 2:\n\nFind some number 푄푄 = 2 푞푞 such that 푄푄 is the first power of 2 greater than or equal to 퐶퐶^2. In this example 퐶퐶^2 = 352 = 1225; so 푄푄 = 2048 and 푞푞 = 11.\n\nStep 3:\n\nEvaluate the expression 푎푎푥푥푀푀 푓푓푑푑 퐶퐶 for all 푥푥 ranging from 0 to 푄푄− 1\n\nTABLE\n\nStep 4:\n\nAs you can see in Step 3, the period, 푟푟, is 12 in this case.\n\nStep 5:\n\nWe observe the following:\n\n푎푎^0 푀푀푓푓푑푑 퐶퐶 = 30 푀푀 푓푓푑푑 35 = 푎푎푔푔푀푀푓푓푑푑 퐶퐶 = 312 푀푀 푓푓푑푑 35 = 1,\n\nwhich means that 푎푎푔푔 is the first non-zero number when divided by 퐶퐶 gives the reminder 1. We can say that 푎푎푔푔- 1 will be divisible by 퐶퐶. This is written as 퐶퐶 | 푎푎푔푔- 1 (read as 퐶퐶 divides 푎푎푔푔- 1). If r is even, then we can rewrite it as follows:\n\n퐶퐶 | 푎푎푔푔- 12\n\n\n퐶퐶 | (푎푎푔푔/^2 )^2 - 12\n\n퐶퐶 | (푎푎푔푔/^2 - 1) (푎푎푔푔/^2 + 1) ( because 푎푎^2 − 푏푏^2 = (푎푎−푏푏)(푎푎+푏푏) ).\n\nIf 푟푟 turns out to be odd, then we need to repeat Step 1 by picking another 푎푎. Also, if 퐶퐶 | (푎푎푔푔/^2 + 1), we need to discard 푟푟 and repeat Step 1. It will be clear soon why we need to do these.\n\nIn addition, we clearly know that 퐶퐶 | (푎푎푔푔/^2 - 1) will never be the case because 푟푟 is the smallest number with the property 퐶퐶 | (푎푎푔푔- 1) by definition.\n\nNow that we have established that 퐶퐶 | (푎푎푔푔/^2 - 1) (푎푎푔푔/^2 + 1) but is not divisible with either of the terms separately, it can only mean that one of the factor, 푝푝, divides (푎푎푔푔/^2 - 1) and another factor, 푞푞, divides (푎푎푔푔/^2 + 1).\n\nSo, p will be a common divisor of 퐶퐶 and (푎푎푔푔/^2 - 1), and q will be a common divisor of 퐶퐶 and (푎푎푔푔/^2 +\n1). To get the values of 푝푝 and 푞푞, we simply need to find GCD( N , (푎푎푔푔/^2 - 1)) to get p and find GCD( N , (푎푎푔푔/^2 + 1)) to get 푞푞.\n\n\n\nIn our example, since 푟푟 = 12 and 퐶퐶 = 35 ;\nthen 푎푎푔푔/^2 - 1 = 312 /^2 - 1 = 728\nand 푎푎푔푔/^2 + 1 = 312 /^2 + 1 = 730.\nTherefore, 푝푝 = GCD(35, 728) = 7\nand 푞푞 = GCD(35, 730) = 5.\n\n\nIf you have read all the steps carefully, you will find that Step 3 is the elephant in the room. Since in real world scenarios, N will be so large that it is impossible to calculate 푎푎푥푥푀푀 푓푓푑푑 퐶퐶 for 푥푥 ranging from 0 to Q -1 without waiting for billions of years even with the most powerful supercomputers. This is where quantum computers will come to our rescue.\n\nWe have a quantum circuit that calculates 푎푎푥푥푀푀 푓푓푑푑 퐶퐶 for any given 푥푥. Though it is not easy to create such a circuit, it is possible because we know that any classical circuit can be converted to a quantum circuit using Toffoli gates. Instead of inputting each value of 푥푥 separately, we will give all the possible values of 푥푥 to the circuit by taking 푞푞 qubits and applying H gates on all of them. The input 푞푞 qubits will be in an equal superposition of |0⟩ to |푄푄− 1 ⟩. The bottom output qubits will be in a superposition of all the possible outputs |푎푎^0 푀푀푓푓푑푑 퐶퐶⟩ to |푎푎푄푄−1 푀푀푓푓푑푑 퐶퐶⟩.\n\nThe system will be something like this (ignoring the normalization coefficients):\n|0⟩|푎푎^0 푀푀푓푓푑푑 퐶퐶⟩ + |1⟩|푎푎^1 푀푀푓푓푑푑 퐶퐶⟩ + |2⟩|푎푎^2 푀푀푓푓푑푑 퐶퐶⟩ ... |Q− 1 ⟩|푎푎푄푄−1 푀푀푓푓푑푑 퐶퐶⟩.\n\nNote that for multiple input qubit states there will be a common output state because 푎푎푥푥 푀푀푓푓푑푑 퐶퐶 is a many-to-one function. Now, if we go ahead and measure the output qubits, out of several superimposed outputs, only one of it will be obtained. Also, the input states will be affected in such a way that only those states that could result in the measured output can remain. The probabilities of all the other states will become zero.\n\n\n\nFrom Table 5.1, we can conclude that all the states that will be present in the input qubits will be separated by 푟푟. You must have noticed that we are closer to getting the answer because all we need is 푟푟 and we executed the circuit only once (exponential improvement must be obvious by now). But measuring the 푞푞 qubits will not help much because we don't have any pattern yet to exploit. Moreover, the first residue state might not be 0 and could be anything (called offset).\n\nNext, we do something called Inverse quantum Fourier Transform (QFT), it does the following:\n 1. It removes the offset and gives some probability for state 0\n 2. It changes the period between the states from 푟푟 to 푄푄/푟푟\n\nNow, if we measure the input 푞푞 qubits we are sure that we will get some multiple of 푄푄/푟푟 say 퐿퐿 ∗ 푄푄/푟푟. The value we measured is 푣푣, so we get the following equation:\n\n푣푣 = 퐿퐿 ∗ 푄푄/푟푟.\n\nRewritten as\n푣푣/푄푄 = 퐿퐿(1/푟푟).\n\nHere, we know the values of 푣푣 and 푄푄. We can divide them until we get the smallest fraction. Now, the denominator will either be 푟푟 or a factor of 푟푟.\n\nFurther details on QFT and the circuit implementation can be found here:\n\nhttps://en.wikipedia.org/wiki/Quantum_Fourier_transform\n\nThe actual circuit for the 푎푎푥푥푀푀 푓푓푑푑 퐶퐶 is beyond the scope of this book and a dedicated reader can get some help over here:\n\nhttps://arxiv.org/abs/quant-ph/0205095",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e79a11be5c67e9c6359b99f5f90cfad7dc4da520 | 148,743 | ipynb | Jupyter Notebook | ipynb/Riddler Battle Royale.ipynb | mikiec84/pytudes | 895c7130397c1763ee8c9f87b8ea1ccc1ccbd447 | [
"MIT"
] | null | null | null | ipynb/Riddler Battle Royale.ipynb | mikiec84/pytudes | 895c7130397c1763ee8c9f87b8ea1ccc1ccbd447 | [
"MIT"
] | null | null | null | ipynb/Riddler Battle Royale.ipynb | mikiec84/pytudes | 895c7130397c1763ee8c9f87b8ea1ccc1ccbd447 | [
"MIT"
] | null | null | null | 171.165708 | 47,274 | 0.853129 | [
[
[
"# Riddler Battle Royale\n\n\n\n> [538's *The Riddler* Asks](http://fivethirtyeight.com/features/the-battle-for-riddler-nation-round-2/): *In a distant, war-torn land, there are 10 castles. There are two warlords: you and your archenemy, with whom you’re competing to collect the most victory points. Each castle has its own strategic value for a would-be conqueror. Specifically, the castles are worth 1, 2, 3, …, 9, and 10 victory points. You and your enemy each have 100 soldiers to distribute, any way you like, to fight at any of the 10 castles. Whoever sends more soldiers to a given castle conquers that castle and wins its victory points. If you each send the same number of troops, you split the points. You don’t know what distribution of forces your enemy has chosen until the battles begin. Whoever wins the most points wins the war. Submit a plan distributing your 100 soldiers among the 10 castles.*\n\n",
"_____no_output_____"
]
],
[
[
"# Load some useful modules\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport csv\nimport random\nfrom collections import Counter\nfrom statistics import mean",
"_____no_output_____"
]
],
[
[
"Let's play with this and see if we can find a good solution. Some implementation choices:\n* A `Plan` will be a tuple of 10 soldier counts (one for each castle).\n* `castles` will hold the indexes of the castles. Note that index 0 is castle 1 (worth 1 point) and index 9 is castle 10 (worth 10 points).\n* `half` is half the total number of points; if you get more than this you win.\n* `plans` will hold a set of plans that were submitted in the previous contest.\n* `play(A, B)` gives the single game reward for Plan A against Plan B: 1 if A wins, 0 if A loses, and 1/2 for a tie.\n* `reward(a, b, payoff)` returns payoff, payoff/2, or 0, depending on whether `a` is bigger than `b`.",
"_____no_output_____"
]
],
[
[
"Plan = tuple \ncastles = range(10)\nhalf = 55/2 \nplans = {Plan(map(int, row[:10])) \n for row in csv.reader(open('battle_royale.csv'))}\n\ndef play(A, B): \n \"Play Plan A against Plan B and return a reward (0, 1/2, or 1).\"\n A_points = sum(reward(A[c], B[c], c + 1) for c in castles)\n return reward(A_points, half) \n\ndef reward(a, b, payoff=1): return (payoff if a > b else payoff / 2 if a == b else 0) ",
"_____no_output_____"
]
],
[
[
"Some tests:",
"_____no_output_____"
]
],
[
[
"assert reward(6, 5, 9) == 9 # 6 soldiers defeat 5, winning all 9 of the castle's points\nassert reward(6, 6, 8) == 4 # A tie on an 8-point castle is worth 4 points\nassert reward(6, 7, 7) == 0 # No points for a loss\nassert reward(30, 25) == 1 # 30 victory points beats 25\n\nassert len(plans) == 1202\n\nassert play((26, 5, 5, 5, 6, 7, 26, 0, 0, 0),\n (25, 0, 0, 0, 0, 0, 0, 25, 25, 25)) == 1 # A wins game\n\nassert play((26, 5, 5, 5, 6, 7, 26, 0, 0, 0),\n (0, 25, 0, 0, 0, 0, 0, 25, 25, 25)) == 0 # B wins game\n\nassert play((25, 5, 5, 5, 6, 7, 26, 0, 0, 0),\n (25, 0, 0, 0, 0, 0, 0, 25, 25, 25)) == 1/2 # Tie game",
"_____no_output_____"
]
],
[
[
"Let's run a tournament, playing each plan against every other, and returning a list of `[(plan, mean_game_points),...]`. I will also define `show` to pretty-print these results and display a histogram:",
"_____no_output_____"
]
],
[
[
"def tournament(plans):\n \"Play each plan against each other; return a sorted list of [(plan: mean_points)]\"\n rankdict = {A: mean_points(A, plans) for A in plans}\n return Counter(rankdict).most_common()\n\ndef mean_points(A, opponents): \n \"Mean points for A playing against all opponents (but not against itself).\"\n return mean(play(A, B) for B in opponents if B is not A)\n\ndef show(rankings, n=10): \n \"Pretty-print the n best plans, and display a histogram of all plans.\"\n print('Top', n, 'of', len(rankings), 'plans:')\n for (plan, points) in rankings[:n]:\n print(pplan(plan), pct(points))\n plt.hist([s for (p, s) in rankings], bins=20)\n \ndef pct(x): return '{:6.1%}'.format(x)\ndef pplan(plan): return '(' + ', '.join('{:2}'.format(c) for c in plan) + ')'",
"_____no_output_____"
],
[
"# This is what the result of a tournament looks like:\ntournament({(26, 5, 5, 5, 6, 7, 26, 0, 0, 0),\n (25, 0, 0, 0, 0, 0, 0, 25, 25, 25),\n (0, 25, 0, 0, 0, 0, 0, 25, 25, 25)})",
"_____no_output_____"
],
[
"# A tournament with all 1202 plans:\nrankings = tournament(plans)\nshow(rankings)",
"Top 10 of 1202 plans:\n( 0, 3, 4, 7, 16, 24, 4, 34, 4, 4) 85.6%\n( 5, 7, 9, 11, 15, 21, 25, 2, 2, 3) 84.1%\n( 3, 5, 8, 10, 13, 1, 26, 30, 2, 2) 83.3%\n( 2, 2, 6, 12, 2, 18, 24, 30, 2, 2) 83.3%\n( 2, 8, 2, 2, 10, 18, 26, 26, 3, 3) 83.2%\n( 3, 6, 7, 9, 11, 2, 27, 31, 2, 2) 83.2%\n( 1, 1, 1, 5, 11, 16, 28, 29, 3, 5) 82.8%\n( 1, 3, 1, 1, 17, 20, 21, 30, 3, 3) 82.6%\n( 3, 6, 10, 12, 16, 21, 26, 2, 2, 2) 82.4%\n( 6, 6, 6, 11, 20, 21, 21, 3, 3, 3) 82.2%\n"
]
],
[
[
"It looks like there are a few really bad plans in there. Let's just keep the top 1000 plans (out of 1202), and re-run the rankings:",
"_____no_output_____"
]
],
[
[
"plans = {A for (A, _) in rankings[:1000]}\nrankings = tournament(plans)\nshow(rankings)",
"Top 10 of 1000 plans:\n( 0, 3, 4, 7, 16, 24, 4, 34, 4, 4) 87.4%\n( 5, 5, 5, 5, 5, 5, 27, 30, 6, 7) 84.8%\n( 5, 5, 5, 5, 5, 5, 30, 30, 5, 5) 84.2%\n( 3, 3, 5, 5, 7, 7, 30, 30, 5, 5) 84.1%\n( 1, 2, 3, 4, 6, 16, 25, 33, 4, 6) 82.5%\n( 2, 2, 2, 5, 5, 26, 26, 26, 3, 3) 82.4%\n( 1, 1, 1, 5, 11, 16, 28, 29, 3, 5) 82.0%\n( 0, 1, 3, 3, 11, 18, 25, 33, 3, 3) 82.0%\n( 5, 7, 9, 11, 15, 21, 25, 2, 2, 3) 81.7%\n( 0, 0, 5, 5, 25, 3, 25, 3, 31, 3) 81.5%\n"
]
],
[
[
"The top 10 plans are still winning over 80%, and the top plan remains `(0, 3, 4, 7, 16, 24, 4, 34, 4, 4)`. This is an interesting plan: it places most of the soldiers on castles 4+5+6+8, which totals only 23 points, so it needs to pick up 5 more points from the other castles (that have mostly 4 soldiers attacking each one). Is this a good strategy? Where should we optiomally allocate soldiers? \n\nTo gain some insight, I'll create a plot with 10 curves, one for each castle. Each curve maps the number of soldiers sent to the castle (on the x-axis) to the expected points won (against the 1000 plans) on the y-axis:\n",
"_____no_output_____"
]
],
[
[
"def plotter(plans, X=range(41)):\n X = list(X)\n def mean_reward(c, s): return mean(reward(s, p[c], c+1) for p in plans)\n for c in range(10):\n plt.plot(X, [mean_reward(c, s) for s in X], '.-')\n plt.xlabel('Number of soldiers (on each of the ten castles)')\n plt.ylabel('Expected points won')\n plt.grid()\n \nplotter(plans)",
"_____no_output_____"
]
],
[
[
"For example, this says that for castle 10 (the orange line at top), there is a big gain in expected return as we increase from 0 to 4 soldiers, and after that the gains are relatively less steep. This plot is interesting, but I can't see how to directly read off a best plan from it.\n\n## Hillclimbing\n\nInstead I'll see if I can improve the existing plans, using a simple *hillclimbing* strategy: Take a Plan A, and change it by randomly moving some soldiers from one castle to another. If that yields more `mean_points`, then keep the updated plan, otherwise discard it. Repeat.",
"_____no_output_____"
]
],
[
[
"def hillclimb(A, plans=plans, steps=1000):\n \"Try to improve Plan A, repeat `steps` times; return new plan and total.\"\n m = mean_points(A, plans)\n for _ in range(steps):\n B = mutate(A)\n m, A = max((m, A), \n (mean_points(B, plans), B))\n return A, m\n\ndef mutate(plan):\n \"Return a new plan that is a slight mutation.\"\n plan = list(plan) # So we can modify it.\n i, j = random.sample(castles, 2)\n plan[i], plan[j] = random_split(plan[i] + plan[j])\n return Plan(plan)\n\ndef random_split(n):\n \"Split the integer n into two integers that sum to n.\"\n r = random.randint(0, n)\n return r, n - r",
"_____no_output_____"
]
],
[
[
"Let's see how well this works. Remember, the best plan so far had a score of `87.4%`. Can we improve on that?",
"_____no_output_____"
]
],
[
[
"hillclimb((0, 3, 4, 7, 16, 24, 4, 34, 4, 4))",
"_____no_output_____"
]
],
[
[
"We got an improvement. Let's see what happens if we start with other plans:",
"_____no_output_____"
]
],
[
[
"hillclimb((10, 10, 10, 10, 10, 10, 10, 10, 10, 10))",
"_____no_output_____"
],
[
"hillclimb((0, 1, 2, 3, 4, 18, 18, 18, 18, 18))",
"_____no_output_____"
],
[
"hillclimb((2, 3, 5, 5, 5, 20, 20, 20, 10, 10))",
"_____no_output_____"
],
[
"hillclimb((0, 0, 5, 5, 25, 3, 25, 3, 31, 3))",
"_____no_output_____"
]
],
[
[
"What if we hillclimb 20 times longer?",
"_____no_output_____"
]
],
[
[
"hillclimb((0, 3, 4, 7, 16, 24, 4, 34, 4, 4), steps=20000)",
"_____no_output_____"
]
],
[
[
"## Opponent modelling\n\nTo have a chance of winning the second round of this contest, we have to predict what the other entries will be like. Nobody knows for sure, but I can hypothesize that the entries will be slightly better than the first round, and try to approximate that by hillclimbing from each of the first-round plans for a small number of steps:",
"_____no_output_____"
]
],
[
[
"def hillclimbers(plans, steps=100):\n \"Return a sorted list of [(improved_plan, mean_points), ...]\"\n pairs = {hillclimb(plan, plans, steps) for plan in plans}\n return sorted(pairs, key=lambda pair: pair[1], reverse=True)",
"_____no_output_____"
],
[
"# For example:\nhillclimbers({(26, 5, 5, 5, 6, 7, 26, 0, 0, 0),\n (25, 0, 0, 0, 0, 0, 0, 25, 25, 25),\n (0, 25, 0, 0, 0, 0, 0, 25, 25, 25)})",
"_____no_output_____"
]
],
[
[
"I will define `plans2` (and `rankings2`) to be my estimate of the entries for round 2:",
"_____no_output_____"
]
],
[
[
"%time rankings2 = hillclimbers(plans)\nplans2 = {A for (A, _) in rankings2}\nshow(rankings2)",
"CPU times: user 6min 11s, sys: 3.21 s, total: 6min 14s\nWall time: 6min 17s\nTop 10 of 1000 plans:\n( 1, 4, 5, 15, 6, 21, 3, 31, 3, 11) 90.8%\n( 0, 3, 5, 14, 7, 21, 3, 30, 4, 13) 90.6%\n( 0, 4, 6, 15, 9, 21, 4, 31, 5, 5) 90.2%\n( 2, 4, 3, 13, 5, 22, 3, 32, 4, 12) 90.1%\n( 0, 3, 5, 15, 8, 21, 4, 32, 6, 6) 90.0%\n( 0, 3, 5, 15, 6, 24, 3, 31, 5, 8) 90.0%\n( 0, 3, 6, 13, 6, 21, 5, 30, 4, 12) 90.0%\n( 3, 4, 5, 15, 7, 21, 2, 31, 6, 6) 89.9%\n( 2, 3, 3, 13, 6, 21, 3, 30, 5, 14) 89.8%\n( 0, 2, 2, 12, 2, 23, 4, 31, 3, 21) 89.8%\n"
]
],
[
[
"Even though we only took 100 steps, the `plans2` plans are greatly improved: Almost all of them defeat 75% or more of the first-round `plans`. The top 10 plans are all very similar, targeting castles 4+6+8+10 (for 28 points), but reserving 20 or soldiers to spread among the other castles. Let's look more carefully at every 40th plan, plus the last one:",
"_____no_output_____"
]
],
[
[
"for (p, m) in rankings2[::40] + [rankings2[-1]]:\n print(pplan(p), pct(m))",
"( 1, 4, 5, 15, 6, 21, 3, 31, 3, 11) 90.8%\n( 0, 6, 3, 13, 3, 22, 2, 32, 4, 15) 88.9%\n( 1, 3, 6, 13, 9, 22, 1, 30, 4, 11) 88.3%\n( 2, 2, 1, 13, 3, 21, 2, 32, 3, 21) 87.9%\n( 0, 2, 5, 5, 15, 2, 28, 31, 5, 7) 87.6%\n( 2, 2, 4, 14, 9, 1, 27, 30, 6, 5) 87.3%\n( 3, 2, 3, 12, 3, 28, 3, 32, 6, 8) 87.0%\n( 1, 3, 2, 5, 18, 3, 26, 3, 33, 6) 86.7%\n( 0, 4, 4, 6, 15, 3, 29, 30, 5, 4) 86.5%\n( 5, 5, 4, 5, 13, 22, 2, 29, 3, 12) 86.2%\n( 5, 6, 5, 6, 16, 24, 26, 1, 5, 6) 85.9%\n( 0, 2, 5, 15, 8, 3, 20, 36, 6, 5) 85.7%\n( 5, 1, 6, 12, 2, 24, 5, 32, 4, 9) 85.4%\n( 2, 5, 8, 16, 11, 3, 2, 36, 5, 12) 85.1%\n( 2, 7, 3, 15, 14, 2, 3, 31, 9, 12) 84.8%\n( 6, 5, 8, 6, 7, 22, 30, 3, 7, 6) 84.6%\n( 5, 3, 3, 5, 3, 21, 26, 26, 3, 5) 84.4%\n( 0, 2, 4, 13, 2, 22, 17, 33, 2, 5) 84.0%\n( 0, 7, 12, 6, 8, 21, 2, 29, 12, 3) 83.5%\n( 5, 5, 4, 13, 18, 2, 26, 2, 6, 19) 83.0%\n( 5, 6, 3, 15, 17, 24, 4, 2, 5, 19) 82.5%\n( 5, 6, 5, 9, 6, 22, 34, 1, 7, 5) 81.8%\n( 4, 3, 7, 17, 17, 22, 3, 3, 5, 19) 81.0%\n( 0, 1, 2, 11, 12, 13, 28, 27, 2, 4) 80.4%\n( 5, 6, 13, 16, 15, 26, 2, 4, 7, 6) 78.9%\n( 0, 0, 1, 13, 0, 1, 24, 21, 36, 4) 70.3%\n"
]
],
[
[
"We see a wider variety in plans as we go farther down the rankings. Now for the plot:",
"_____no_output_____"
]
],
[
[
"plotter(plans2)",
"_____no_output_____"
]
],
[
[
"We see that many castles (e.g. 9 (green), 8 (blue), 7 (black), 6 (yellowish)) have two plateaus. Castle 7 (black) has a plateau at 3.5 points for 6 to 20 soldiers (suggesting that 6 soldiers is a good investment and 20 soldiers a bad investment), and then another plateau at 7 points for everything above 30 soldiers.\n\nNow that we have an estimate of the opponents, we can use `hillclimbers` to try to find a plan that does well against all the others:",
"_____no_output_____"
]
],
[
[
"%time rankings3 = hillclimbers(plans2)\nshow(rankings3)",
"CPU times: user 5min 40s, sys: 1 s, total: 5min 41s\nWall time: 5min 42s\nTop 10 of 1000 plans:\n( 3, 8, 10, 18, 21, 3, 5, 6, 10, 16) 99.9%\n( 1, 9, 10, 17, 21, 6, 4, 6, 9, 17) 99.9%\n( 1, 8, 10, 18, 21, 4, 4, 6, 11, 17) 99.9%\n( 0, 10, 10, 17, 20, 4, 5, 6, 7, 21) 99.9%\n( 2, 11, 1, 16, 18, 7, 6, 6, 8, 25) 99.8%\n( 1, 8, 11, 19, 20, 4, 6, 5, 7, 19) 99.8%\n( 0, 1, 11, 15, 18, 7, 6, 5, 13, 24) 99.8%\n( 2, 10, 1, 17, 18, 9, 5, 6, 8, 24) 99.8%\n( 1, 9, 10, 17, 19, 4, 6, 6, 9, 19) 99.8%\n( 0, 2, 11, 18, 21, 4, 6, 8, 8, 22) 99.8%\n"
]
],
[
[
"We can try even harder to improve the champ:",
"_____no_output_____"
]
],
[
[
"champ, _ = rankings3[0]\nhillclimb(champ, plans2, 10000)",
"_____no_output_____"
]
],
[
[
"Here are some champion plans from previous runs of this notebook:",
"_____no_output_____"
]
],
[
[
"champs = {\n (0, 1, 3, 16, 20, 3, 4, 5, 32, 16),\n (0, 1, 9, 16, 15, 24, 5, 5, 8, 17),\n (0, 1, 9, 16, 16, 24, 5, 5, 7, 17),\n (0, 2, 9, 16, 15, 24, 5, 5, 8, 16),\n (0, 2, 9, 16, 15, 25, 5, 4, 7, 17),\n (0, 3, 4, 7, 16, 24, 4, 34, 4, 4),\n (0, 3, 5, 6, 20, 4, 4, 33, 8, 17),\n (0, 4, 5, 7, 20, 4, 4, 33, 7, 16),\n (0, 4, 6, 7, 19, 4, 4, 31, 8, 17),\n (0, 4, 12, 18, 21, 7, 6, 4, 8, 20),\n (0, 4, 12, 19, 25, 4, 5, 6, 8, 17),\n (0, 5, 6, 7, 18, 4, 5, 32, 7, 16),\n (0, 5, 7, 3, 18, 4, 4, 34, 8, 17),\n (1, 2, 9, 16, 15, 24, 5, 4, 7, 17),\n (1, 2, 9, 16, 15, 24, 5, 4, 8, 16),\n (1, 2, 11, 16, 15, 24, 5, 4, 7, 15),\n (1, 3, 14, 18, 24, 4, 5, 6, 8, 17),\n (1, 6, 3, 16, 16, 24, 5, 5, 7, 17),\n (2, 3, 7, 16, 16, 25, 5, 5, 8, 13),\n (2, 3, 8, 16, 12, 25, 5, 4, 8, 17),\n (2, 3, 8, 16, 15, 24, 5, 4, 7, 16),\n (2, 3, 8, 16, 15, 25, 4, 5, 8, 14),\n (2, 3, 8, 16, 16, 24, 5, 5, 8, 13),\n (2, 3, 9, 15, 12, 25, 4, 5, 8, 17),\n (2, 3, 9, 16, 12, 24, 5, 5, 8, 16),\n (2, 4, 12, 18, 24, 4, 6, 5, 8, 17),\n (3, 3, 7, 16, 16, 24, 5, 5, 8, 13),\n (3, 3, 8, 16, 12, 25, 4, 4, 8, 17),\n (3, 3, 8, 16, 15, 25, 5, 4, 7, 14),\n (3, 4, 12, 18, 23, 4, 6, 5, 8, 17),\n (3, 4, 15, 18, 23, 4, 5, 6, 8, 14),\n (3, 5, 7, 16, 5, 4, 5, 34, 7, 14),\n (3, 6, 13, 17, 23, 4, 6, 5, 8, 15),\n (4, 3, 12, 18, 23, 4, 5, 6, 8, 17),\n (4, 5, 3, 15, 11, 23, 5, 5, 10, 19),\n (4, 6, 3, 16, 14, 25, 5, 5, 8, 14),\n (4, 6, 3, 16, 16, 24, 5, 5, 7, 14),\n (4, 6, 3, 16, 16, 24, 5, 5, 8, 13),\n (5, 3, 12, 17, 23, 4, 5, 6, 8, 17),\n (5, 5, 3, 16, 12, 25, 4, 5, 8, 17),\n (5, 6, 3, 16, 16, 24, 5, 5, 7, 13),\n (5, 6, 7, 3, 21, 4, 27, 5, 8, 14),\n (5, 6, 8, 3, 18, 4, 27, 5, 8, 16),\n (5, 6, 8, 3, 20, 4, 27, 5, 8, 14),\n (5, 6, 8, 3, 21, 4, 27, 5, 8, 13)}",
"_____no_output_____"
]
],
[
[
"We can evaluate each of them against the original `plans`, against the improved `plans2`, against their fellow champs, and against all of those put together:",
"_____no_output_____"
]
],
[
[
"def μ(plan, plans): return pct(mean_points(plan,plans))\nall = plans | plans2 | champs\n\nprint('Plan plans plans2 champs all')\nfor p in sorted(champs, key=lambda p: -mean_points(p, all)):\n print(pplan(p), μ(p, plans), μ(p, plans2), μ(p, champs), μ(p, all))",
"Plan plans plans2 champs all\n( 0, 5, 7, 3, 18, 4, 4, 34, 8, 17) 85.5% 96.0% 68.5% 90.2%\n( 0, 4, 6, 7, 19, 4, 4, 31, 8, 17) 84.7% 95.0% 63.0% 89.2%\n( 0, 1, 3, 16, 20, 3, 4, 5, 32, 16) 85.6% 95.2% 31.5% 89.0%\n( 0, 3, 5, 6, 20, 4, 4, 33, 8, 17) 84.1% 95.2% 60.9% 89.0%\n( 0, 5, 6, 7, 18, 4, 5, 32, 7, 16) 84.3% 96.3% 28.3% 88.9%\n( 3, 5, 7, 16, 5, 4, 5, 34, 7, 14) 85.2% 95.7% 18.5% 88.8%\n( 5, 6, 8, 3, 18, 4, 27, 5, 8, 16) 81.8% 96.4% 64.1% 88.6%\n( 0, 4, 5, 7, 20, 4, 4, 33, 7, 16) 84.7% 95.0% 18.5% 88.2%\n( 5, 6, 8, 3, 20, 4, 27, 5, 8, 14) 82.0% 96.2% 48.9% 88.2%\n( 0, 1, 9, 16, 15, 24, 5, 5, 8, 17) 78.2% 98.6% 72.8% 88.0%\n( 5, 6, 7, 3, 21, 4, 27, 5, 8, 14) 81.8% 96.0% 51.1% 88.0%\n( 0, 1, 9, 16, 16, 24, 5, 5, 7, 17) 79.1% 98.5% 46.7% 87.8%\n( 5, 6, 8, 3, 21, 4, 27, 5, 8, 13) 82.0% 95.2% 45.7% 87.6%\n( 2, 3, 9, 15, 12, 25, 4, 5, 8, 17) 78.5% 97.9% 58.7% 87.5%\n( 4, 5, 3, 15, 11, 23, 5, 5, 10, 19) 76.8% 97.8% 97.8% 87.5%\n( 2, 3, 8, 16, 12, 25, 5, 4, 8, 17) 78.2% 98.1% 57.6% 87.5%\n( 2, 3, 8, 16, 15, 25, 4, 5, 8, 14) 79.7% 97.9% 31.5% 87.5%\n( 0, 2, 9, 16, 15, 24, 5, 5, 8, 16) 78.5% 98.2% 50.0% 87.4%\n( 2, 3, 7, 16, 16, 25, 5, 5, 8, 13) 79.3% 97.5% 44.6% 87.4%\n( 4, 6, 3, 16, 14, 25, 5, 5, 8, 14) 79.0% 97.4% 48.9% 87.3%\n( 5, 5, 3, 16, 12, 25, 4, 5, 8, 17) 78.1% 97.8% 60.9% 87.3%\n( 4, 6, 3, 16, 16, 24, 5, 5, 7, 14) 80.3% 97.2% 21.7% 87.3%\n( 2, 3, 8, 16, 15, 24, 5, 4, 7, 16) 80.2% 97.8% 10.9% 87.2%\n( 1, 2, 9, 16, 15, 24, 5, 4, 7, 17) 79.8% 97.8% 19.6% 87.2%\n( 0, 2, 9, 16, 15, 25, 5, 4, 7, 17) 79.1% 97.9% 31.5% 87.2%\n( 2, 3, 8, 16, 16, 24, 5, 5, 8, 13) 79.5% 97.5% 29.3% 87.2%\n( 2, 3, 9, 16, 12, 24, 5, 5, 8, 16) 78.0% 98.2% 45.7% 87.1%\n( 3, 3, 8, 16, 15, 25, 5, 4, 7, 14) 80.3% 97.6% 6.5% 87.1%\n( 1, 2, 9, 16, 15, 24, 5, 4, 8, 16) 79.2% 97.6% 27.2% 87.0%\n( 3, 3, 7, 16, 16, 24, 5, 5, 8, 13) 79.8% 97.1% 26.1% 87.0%\n( 4, 6, 3, 16, 16, 24, 5, 5, 8, 13) 80.0% 96.7% 28.3% 87.0%\n( 3, 3, 8, 16, 12, 25, 4, 4, 8, 17) 78.8% 97.8% 28.3% 87.0%\n( 5, 6, 3, 16, 16, 24, 5, 5, 7, 13) 80.9% 96.5% 8.7% 86.9%\n( 1, 2, 11, 16, 15, 24, 5, 4, 7, 15) 79.9% 97.2% 6.5% 86.7%\n( 1, 6, 3, 16, 16, 24, 5, 5, 7, 17) 75.2% 97.9% 41.3% 85.5%\n( 5, 3, 12, 17, 23, 4, 5, 6, 8, 17) 64.3% 99.5% 84.8% 82.0%\n( 4, 3, 12, 18, 23, 4, 5, 6, 8, 17) 64.0% 99.5% 88.0% 81.9%\n( 3, 4, 12, 18, 23, 4, 6, 5, 8, 17) 63.2% 99.5% 88.0% 81.5%\n( 2, 4, 12, 18, 24, 4, 6, 5, 8, 17) 63.0% 99.5% 91.3% 81.5%\n( 3, 4, 15, 18, 23, 4, 5, 6, 8, 14) 63.4% 99.5% 76.1% 81.3%\n( 3, 6, 13, 17, 23, 4, 6, 5, 8, 15) 63.2% 99.4% 78.3% 81.2%\n( 1, 3, 14, 18, 24, 4, 5, 6, 8, 17) 62.4% 99.5% 93.5% 81.2%\n( 0, 4, 12, 19, 25, 4, 5, 6, 8, 17) 62.1% 99.5% 95.7% 81.1%\n( 1, 9, 11, 17, 21, 6, 5, 4, 10, 16) 62.1% 100.0% 76.1% 80.9%\n( 0, 4, 12, 18, 21, 7, 6, 4, 8, 20) 61.4% 99.6% 100.0% 80.9%\n( 1, 7, 13, 17, 23, 6, 6, 5, 8, 14) 62.4% 99.5% 78.3% 80.9%\n( 0, 3, 4, 7, 16, 24, 4, 34, 4, 4) 87.4% 37.6% 0.0% 61.1%\n"
]
],
[
[
"Which plan is best? In the end, we don't know, because we don't know the pool we will be competing against.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e79a1cfd115e3122839d2e72ea933472c06bcadb | 2,819 | ipynb | Jupyter Notebook | nesting_functions_accessing_variable_scope.ipynb | MarcusDMelv/Python-Methods-and-Functions | 0ed46d50695687faf5b80de8da0e3666a2074287 | [
"Unlicense"
] | null | null | null | nesting_functions_accessing_variable_scope.ipynb | MarcusDMelv/Python-Methods-and-Functions | 0ed46d50695687faf5b80de8da0e3666a2074287 | [
"Unlicense"
] | null | null | null | nesting_functions_accessing_variable_scope.ipynb | MarcusDMelv/Python-Methods-and-Functions | 0ed46d50695687faf5b80de8da0e3666a2074287 | [
"Unlicense"
] | null | null | null | 24.72807 | 413 | 0.538134 | [
[
[
"# a nested function access the outer scope of the enclosing function - This is referred to as closure",
"_____no_output_____"
],
[
"def display_message(message):\n print('Hello')\n def message_sender():\n print('Nested function')\n print(message)\n message_sender()\ndisplay_message('Show me the money')",
"Hello\nNested function\nShow me the money\n"
],
[
"display_message('Roger')",
"Nested function\nRoger\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e79a28466080fccbd768c6e4c92e3652337a31cb | 554,963 | ipynb | Jupyter Notebook | analysis/Jamie/milestones2_EDA.ipynb | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 1 | 2021-02-09T02:13:23.000Z | 2021-02-09T02:13:23.000Z | analysis/Jamie/milestones2_EDA.ipynb | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 31 | 2021-02-02T17:03:39.000Z | 2021-04-13T03:22:16.000Z | analysis/Jamie/milestones2_EDA.ipynb | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 1 | 2021-03-14T05:56:16.000Z | 2021-03-14T05:56:16.000Z | 497.278674 | 86,627 | 0.741745 | [
[
[
"# Individual EDA\n\n- Separate the states into 4 regions: Western, southern, eastern and northern.\n- Filter data based on assigned regions and explore with support from visualization\n- North East and South is the main focus in this EDA.\n\n___",
"_____no_output_____"
],
[
"## Data Filtering",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Add scripts module's directory to sys.path\nimport sys, os\nsys.path.append(os.path.join(os.getcwd(),\"..\"))\nfrom scripts import project_functions as pf",
"_____no_output_____"
],
[
"# Load 4 parts of raw data on State Names\nstate_df = pf.load_and_process_many(\"../../data/raw/state\")\n\n# Note that the project_fuctions module includes list of abbreviations for states separated in regions\n# Let's slice out only the north east and south\nn_df = state_df.loc[state_df[\"State\"].isin(pf.NORTH_EAST)].reset_index(drop=True)\ns_df = state_df.loc[state_df[\"State\"].isin(pf.SOUTH)].reset_index(drop=True)\n",
"_____no_output_____"
]
],
[
[
"___",
"_____no_output_____"
],
[
"## Initial inspection\n\nLet's have a general of the data set for each region.",
"_____no_output_____"
],
[
"### North East region",
"_____no_output_____"
]
],
[
[
"n_df.head(10)",
"_____no_output_____"
],
[
"n_df.shape",
"_____no_output_____"
]
],
[
[
"For the North East, we see that there are **more than 1 million collected record** and **5 variable for each observation**. ",
"_____no_output_____"
]
],
[
[
"n_df.columns",
"_____no_output_____"
]
],
[
[
"Indeed, we have 5 variables for each observation. **The state column is not important since we care only about regions.**",
"_____no_output_____"
]
],
[
[
"n_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1077888 entries, 0 to 1077887\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Name 1077888 non-null object\n 1 Year 1077888 non-null int64 \n 2 Gender 1077888 non-null object\n 3 State 1077888 non-null object\n 4 Count 1077888 non-null int64 \ndtypes: int64(2), object(3)\nmemory usage: 41.1+ MB\n"
]
],
[
[
"We see Year and Count are 64-bit integers type while other columns are categorial types.",
"_____no_output_____"
]
],
[
[
"n_df.describe(include=[object]).T",
"_____no_output_____"
]
],
[
[
"For categorial data:\n\n- We see that there are 3 categorical variable in the dataframe with other 2 numerical variable (Year and Count)\n- Here, we can see that are 15817 unique names in this region\n- There are 11 states recorded that equal to total number of states in this region. This means all states participates in this survey.\n- It is not clear whether John is the most popular all of times since we also have a count column",
"_____no_output_____"
]
],
[
[
"n_df.describe().T",
"_____no_output_____"
]
],
[
[
"Summary on numerical values do not give any useful information. ",
"_____no_output_____"
]
],
[
[
"n_df[\"Year\"].unique()",
"_____no_output_____"
]
],
[
[
"The data set span from 1910 to 2014 without any missing years.",
"_____no_output_____"
]
],
[
[
"len(n_df.loc[n_df[\"Count\"]<=0])",
"_____no_output_____"
]
],
[
[
"This shows that we do not have negative values for names'count.",
"_____no_output_____"
],
[
"### South region",
"_____no_output_____"
]
],
[
[
"s_df.head(10)",
"_____no_output_____"
],
[
"s_df.shape",
"_____no_output_____"
]
],
[
[
"For the South, we see that there are **more than 2 million collected record** and **5 variable for each observation**.",
"_____no_output_____"
]
],
[
[
"# We have 5 variables for each observation\ns_df.columns",
"_____no_output_____"
]
],
[
[
"This is similar to that of North East region.",
"_____no_output_____"
]
],
[
[
"s_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2173021 entries, 0 to 2173020\nData columns (total 5 columns):\n # Column Dtype \n--- ------ ----- \n 0 Name object\n 1 Year int64 \n 2 Gender object\n 3 State object\n 4 Count int64 \ndtypes: int64(2), object(3)\nmemory usage: 82.9+ MB\n"
]
],
[
[
"The type of each column is also similar to that of North East dataset.",
"_____no_output_____"
]
],
[
[
"s_df.describe(include=[object]).T",
"_____no_output_____"
]
],
[
[
"For categorial data:\n\n- We see that there are 3 categorical variable in the dataframe with other 2 numerical variable (Year and Count)\n- Here, we can see that are 20860 unique names in this region\n- There are 17 states recorded that equal to total number of states in this region. This means all states participates in this survey\n- It is not clear whether Jessie is the most popular all of times since we also have a count column",
"_____no_output_____"
]
],
[
[
"s_df.describe().T",
"_____no_output_____"
]
],
[
[
"Summary on numerical values do not give any useful information.\n",
"_____no_output_____"
]
],
[
[
"s_df.loc[s_df[\"Count\"]<=0]",
"_____no_output_____"
]
],
[
[
"This shows that we do not have negative values for names'count.",
"_____no_output_____"
]
],
[
[
"s_df[\"Year\"].unique()",
"_____no_output_____"
]
],
[
[
"The data set also spans from 1910 to 2014 without gaps!",
"_____no_output_____"
],
[
"___\n## Analysis\n### Top 5 of all times in South and North",
"_____no_output_____"
],
[
"\nWe start by aggregating sum of counts of every name in each region for all years.",
"_____no_output_____"
]
],
[
[
"# Define processing function\ndef get_top5_all_time(data=None):\n if data is None:\n return data\n return (data.groupby(by=\"Name\")\n .aggregate(\"sum\")\n .drop(columns=[\"Year\"]) # We do not analyze with time\n .reset_index()\n .sort_values(by=\"Count\", ascending=False)\n .head()\n )\n# For the north east\ntop5_n = get_top5_all_time(n_df)\n# For the south\ntop5_s = get_top5_all_time(s_df)",
"_____no_output_____"
],
[
"top5_n",
"_____no_output_____"
],
[
"top5_s",
"_____no_output_____"
]
],
[
[
"The code works properly to return top 5 all times in these two regions.",
"_____no_output_____"
],
[
"Now, we can build plots. In this case, for counting the number of occurence for each discrete entry, bar plots is ideal.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12,7), sharex=True)\n# Check similarity between 2 regions\nsns.set_theme(context=\"notebook\", style=\"ticks\", font_scale=1.3)\n\ndef get_top5_all_time(data, ax, region):\n plot = sns.barplot(y=\"Name\",\n x=\"Count\",\n data=data,\n order=data[\"Name\"],\n ax=ax\n )\n plot.set_title(f\"{region} Top 5 Name All Time\")\n return plot\n# North graph\nnorth = get_top5_all_time(top5_n , ax[0], \"North East\")\n\n# South graph\nsouth = get_top5_all_time(top5_s, ax[1], \"South\")\n\n# Show plot\nsns.despine()\nfig.tight_layout(pad=3.0)\nfig.suptitle(\"Top 5 names of all times in North East and South Region\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Observations\n\n- We can see that top 5 in these 2 regions are quite similar with the appearance of **James, William, Robert and John**. The difference is that **Michael** is in top 5 in the North East while **Mary** is in the top 5 in the South.\n\n- All names in top 5 list in both region pass the mark of **1 million** count of all time. Maximum count in the *North East* is almost **1.6 millions** while that of the *South* surpasses **2 millions**.\n\n- In the **North East** region, **John is the most popular name all times**, followed by Robert. James is at the last of the list.\n\n- In the **South region**, however, **James appears to the most popular name of all times**, followed by John, who takes the top in the South.\n___\n",
"_____no_output_____"
],
[
"### Top 5 of all times of each gender South and North",
"_____no_output_____"
],
[
"We start by filtering out data set based on gender.",
"_____no_output_____"
]
],
[
[
"# Function for filter data based on gender\ndef get_top5_gender(data, region, gender):\n return (data.loc[data[\"Gender\"] == gender]\n .groupby(by=\"Name\")\n .agg(np.sum)\n .sort_values(by=\"Count\", ascending=False)\n .head()\n .drop(columns=\"Year\") # We do not care about year\n .assign(Region=region, Gender=gender)\n .reset_index()\n )\n\n# In the North East\ntop5_male_n, top5_female_n = (get_top5_gender(n_df, \"NE\", \"M\"), \n get_top5_gender(n_df, \"NE\", \"F\")\n )\n\n# In the South\ntop5_male_s, top5_female_s = (get_top5_gender(s_df, \"S\", \"M\"), \n get_top5_gender(s_df, \"S\", \"F\")\n )\n",
"_____no_output_____"
]
],
[
[
"Now, we can plot. In this case, we will will bar plot to indicate counts and FacetGrid as way to categorize plot based on region and gender.",
"_____no_output_____"
]
],
[
[
"# Settings\nsns.set_theme(style=\"ticks\", font_scale=1.3)\nfig,ax= plt.subplots(1,2, figsize=(12,7), sharex=True)\n\ndef draw_gender_plot(axes, data_list, result_axes=None):\n if result_axes is None:\n result_axes = list()\n for i in range(len(ax)):\n data = data_list[i]\n ax_ij = sns.barplot(x=\"Count\",\n y=\"Name\",\n data=data,\n ax=ax[i]\n )\n region, gender = data[\"Region\"][0], data[\"Gender\"][0]\n ax_ij.set_title(f\"Region = {region} | Gender = {gender}\")\n result_axes.append(ax_ij)\n \n return result_axes\n\ndraw_gender_plot(ax, [top5_male_n,top5_male_s])\n\n# Configure figure object\nsns.despine()\nfig.tight_layout(pad=2.7)\nfig.suptitle(\"Top 5 male names of all times in North East and South Region\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Observation\n\n- Interestingly, <mark>two regions have the same names in the top 5 male names all times</mark>. This might result from the fact that these two regions are close to each other.\n\n- However, the pattern is different. In the **North East**, **John is most occuring name** with over 1.5 million counts. In the **South**, **James is at top** with over 2 million counts.\n\n- Generally, **count in the South is higher than that in the North East for the top 5 male name.** ",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(1,2, figsize=(12,7), sharex=True)\ndraw_gender_plot(ax,[top5_female_n,top5_female_s])\n# Configure figure object\nsns.despine()\nfig.tight_layout(pad=2.7)\nfig.suptitle(\"Top 5 female names of all times in North East and South Region\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### Observations\n\n- Even more interesting, <mark>the top 5 female names see Mary at top for both region</mark>. **Mary's count is almost double that of other names in the list.**\n\n- The two list seems similar with the appearance of Mary, Patricia, Elizabeth. Unlike male top 5, **this list differs by 2 names between two region**. In the North East, Barbara and Margaret are in the list. In the South, Linda and Betty are the list.\n\n- Generally, **count in the South is higher than that of the North East for top 5 female names**",
"_____no_output_____"
],
[
"#### Summary\n\n- There are particular difference between male and female top 5. Male top 5 has the same pattern between 2 regions. Female top 5 see the same name at the top.\n\n- The difference in count between top name in male and other names the list is not significant. However, in the female list, that difference is almost double.\n___",
"_____no_output_____"
],
[
"### Proportion of each name from 1910 to 2014",
"_____no_output_____"
]
],
[
[
"def get_proportion_df(data, region):\n p_df = (data.pivot_table(index=\"Year\",\n columns=\"Name\",\n values=\"Count\",\n aggfunc=\"sum\")\n .fillna(0)\n )\n y = data.groupby(by=\"Year\").sum()\n\n for year in range(1910,2015):\n p_df.loc[year,:] = p_df.loc[year,:]/y.loc[year,\"Count\"]\n\n l = list()\n for i in range(1, len(p_df.columns)):\n l.append(p_df.iloc[:,i])\n df = pd.DataFrame(pd.concat(l), columns=[\"Percentage\"]).reset_index()\n return df",
"_____no_output_____"
],
[
"threshold = 0.005 # Only consider percentage above this point",
"_____no_output_____"
],
[
"n_prop= get_proportion_df(n_df, \"NE\").loc[lambda df: df.Percentage > threshold].reset_index(drop=True)",
"_____no_output_____"
],
[
"n_prop",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12,8))\nsns.set_theme(style=\"whitegrid\", font_scale=1.4)\nfirst = n_prop.loc[n_prop.Year <= 1936].assign(Group=\"1910-1936\")\nsecond = n_prop.loc[(n_prop.Year > 1936) & (n_prop.Year <= 1962)].assign(Group=\"1937-1962\")\nthird = n_prop.loc[(n_prop.Year > 1962) & (n_prop.Year <= 1988)].assign(Group=\"1962-1988\")\nlast = n_prop.loc[n_prop.Year > 1988].assign(Group=\"1988-2014\")\n\nmerge = pd.concat([first, second, third, last])\nn_plot = sns.boxplot(x=merge.Group,y=merge.Percentage)\nn_plot.set(title=\"Percentage distribution of names in each year group of Norht East Region\", ylabel=\"Percentage\", xlabel=\"Year group\")\nplt.show()",
"_____no_output_____"
],
[
"s_prop= get_proportion_df(s_df, \"S\").loc[lambda df: df.Percentage > threshold].reset_index(drop=True)",
"_____no_output_____"
],
[
"s_prop",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12,8))\nsns.set_theme(style=\"whitegrid\", font_scale=1.4)\nfirst = s_prop.loc[s_prop.Year <= 1936].assign(Group=\"1910-1936\")\nsecond = s_prop.loc[(s_prop.Year > 1936) & (s_prop.Year <= 1962)].assign(Group=\"1937-1962\")\nthird = s_prop.loc[(s_prop.Year > 1962) & (s_prop.Year <= 1988)].assign(Group=\"1962-1988\")\nlast = s_prop.loc[s_prop.Year > 1988].assign(Group=\"1988-2014\")\n\nmerge = pd.concat([first, second, third, last])\nn_plot = sns.boxplot(x=merge.Group,y=merge.Percentage)\nn_plot.set(title=\"Percentage distribution of names in each year group of South Region\", ylabel=\"Percentage\", xlabel=\"Year group\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Observation\n\n- Note: We only consider data above the threshold.\n- From the plot for proportion distribution of top name in year, we can see most have a very low proportion that is mostly between 3%-4%. There is a very small portion that exceeeds 5%. However, none reaches 6%.\n- This indicates all other names are do not exceed 5%.\n- This confirms that the set of unique names is large.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e79a487f40f41e5e1f7c0f6e6ecbf7b92c7105bc | 22,380 | ipynb | Jupyter Notebook | 07_train/archive/extras/pytorch/pytorch_mnist.ipynb | MarcusFra/workshop | 83f16d41f5e10f9c23242066f77a14bb61ac78d7 | [
"Apache-2.0"
] | 2,327 | 2020-03-01T09:47:34.000Z | 2021-11-25T12:38:42.000Z | 07_train/archive/extras/pytorch/pytorch_mnist.ipynb | MarcusFra/workshop | 83f16d41f5e10f9c23242066f77a14bb61ac78d7 | [
"Apache-2.0"
] | 209 | 2020-03-01T17:14:12.000Z | 2021-11-08T20:35:42.000Z | 07_train/archive/extras/pytorch/pytorch_mnist.ipynb | MarcusFra/workshop | 83f16d41f5e10f9c23242066f77a14bb61ac78d7 | [
"Apache-2.0"
] | 686 | 2020-03-03T17:24:51.000Z | 2021-11-25T23:39:12.000Z | 33.057607 | 776 | 0.6063 | [
[
[
"# PyTorch Training and Serving in SageMaker \"Script Mode\"\n\nScript mode is a training script format for PyTorch that lets you execute any PyTorch training script in SageMaker with minimal modification. The [SageMaker Python SDK](https://github.com/aws/sagemaker-python-sdk) handles transferring your script to a SageMaker training instance. On the training instance, SageMaker's native PyTorch support sets up training-related environment variables and executes your training script. In this tutorial, we use the SageMaker Python SDK to launch a training job and deploy the trained model.\n\nScript mode supports training with a Python script, a Python module, or a shell script. In this example, we use a Python script to train a classification model on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). In this example, we will show how easily you can train a SageMaker using PyTorch scripts with SageMaker Python SDK. In addition, this notebook demonstrates how to perform real time inference with the [SageMaker PyTorch Serving container](https://github.com/aws/sagemaker-pytorch-serving-container). The PyTorch Serving container is the default inference method for script mode. For full documentation on deploying PyTorch models, please visit [here](https://github.com/aws/sagemaker-python-sdk/blob/master/doc/using_pytorch.rst#deploy-pytorch-models).",
"_____no_output_____"
],
[
"## Contents\n\n1. [Background](#Background)\n1. [Setup](#Setup)\n1. [Data](#Data)\n1. [Train](#Train)\n1. [Host](#Host)\n\n---\n\n## Background\n\nMNIST is a widely used dataset for handwritten digit classification. It consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). This tutorial will show how to train and test an MNIST model on SageMaker using PyTorch.\n\nFor more information about the PyTorch in SageMaker, please visit [sagemaker-pytorch-containers](https://github.com/aws/sagemaker-pytorch-containers) and [sagemaker-python-sdk](https://github.com/aws/sagemaker-python-sdk) github repositories.\n\n---\n\n## Setup\n\n_This notebook was created and tested on an ml.m4.xlarge notebook instance._",
"_____no_output_____"
],
[
"### Install SageMaker Python SDK",
"_____no_output_____"
]
],
[
[
"!pip install sagemaker --upgrade --ignore-installed --no-cache --user",
"_____no_output_____"
],
[
"!pip install torch==1.3.1 torchvision==0.4.2 --upgrade --ignore-installed --no-cache --user",
"_____no_output_____"
]
],
[
[
"Forcing `pillow==6.2.1` due to https://discuss.pytorch.org/t/cannot-import-name-pillow-version-from-pil/66096",
"_____no_output_____"
]
],
[
[
"!pip uninstall -y pillow",
"_____no_output_____"
],
[
"!pip install pillow==6.2.1 --upgrade --ignore-installed --no-cache --user",
"_____no_output_____"
]
],
[
[
"### Restart the Kernel to Recognize New Dependencies Above",
"_____no_output_____"
]
],
[
[
"from IPython.display import display_html\ndisplay_html(\"<script>Jupyter.notebook.kernel.restart()</script>\", raw=True)",
"_____no_output_____"
],
[
"!pip3 list",
"_____no_output_____"
]
],
[
[
"## Create the SageMaker Session",
"_____no_output_____"
]
],
[
[
"import os\nimport sagemaker\nfrom sagemaker import get_execution_role\n\nsagemaker_session = sagemaker.Session()",
"_____no_output_____"
]
],
[
[
"## Setup the Service Execution Role and Region\nGet IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the `sagemaker.get_execution_role()` with a the appropriate full IAM role arn string(s).",
"_____no_output_____"
]
],
[
[
"role = get_execution_role()\nprint('RoleARN: {}\\n'.format(role))\n\nregion = sagemaker_session.boto_session.region_name\nprint('Region: {}'.format(region))",
"_____no_output_____"
]
],
[
[
"## Training Data",
"_____no_output_____"
],
[
"### Copy the Training Data to Your Notebook Disk",
"_____no_output_____"
]
],
[
[
"local_data_path = './data'",
"_____no_output_____"
],
[
"from torchvision import datasets, transforms\n\nnormalization_mean = 0.1307\nnormalization_std = 0.3081\n\n# download the dataset\n# this will not only download data to ./mnist folder, but also load and transform (normalize) them\ndatasets.MNIST(local_data_path, download=True, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((normalization_mean,), (normalization_std,))\n]))",
"_____no_output_____"
],
[
"!ls -R {local_data_path}",
"_____no_output_____"
]
],
[
[
"### Upload the Data to S3 for Distributed Training Across Many Workers\nWe are going to use the `sagemaker.Session.upload_data` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use later when we start the training job.\n\nThis is S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.",
"_____no_output_____"
]
],
[
[
"bucket = sagemaker_session.default_bucket()\ndata_prefix = 'sagemaker/pytorch-mnist/data'",
"_____no_output_____"
],
[
"training_data_uri = sagemaker_session.upload_data(path=local_data_path, bucket=bucket, key_prefix=data_prefix)\nprint('Input spec (S3 path): {}'.format(training_data_uri))",
"_____no_output_____"
],
[
"!aws s3 ls --recursive {training_data_uri}",
"_____no_output_____"
]
],
[
[
"## Train\n### Training Script\nThe `mnist_pytorch.py` script provides all the code we need for training and hosting a SageMaker model (`model_fn` function to load a model).\nThe training script is very similar to a training script you might run outside of SageMaker, but you can access useful properties about the training environment through various environment variables, such as:\n\n* `SM_MODEL_DIR`: A string representing the path to the directory to write model artifacts to.\n These artifacts are uploaded to S3 for model hosting.\n* `SM_NUM_GPUS`: The number of gpus available in the current container.\n* `SM_CURRENT_HOST`: The name of the current container on the container network.\n* `SM_HOSTS`: JSON encoded list containing all the hosts .\n\nSupposing one input channel, 'training', was used in the call to the PyTorch estimator's `fit()` method, the following will be set, following the format `SM_CHANNEL_[channel_name]`:\n\n* `SM_CHANNEL_TRAINING`: A string representing the path to the directory containing data in the 'training' channel.\n\nFor more information about training environment variables, please visit [SageMaker Containers](https://github.com/aws/sagemaker-containers).\n\nA typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to `model_dir` so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an `argparse.ArgumentParser` instance.\n\nBecause the SageMaker imports the training script, you should put your training code in a main guard (``if __name__=='__main__':``) if you are using the same script to host your model as we do in this example, so that SageMaker does not inadvertently run your training code at the wrong point in execution.\n\nFor example, the script run by this notebook:",
"_____no_output_____"
]
],
[
[
"!ls ./src/mnist_pytorch.py",
"_____no_output_____"
]
],
[
[
"You can add custom Python modules to the `src/requirements.txt` file. They will automatically be installed - and made available to your training script.",
"_____no_output_____"
]
],
[
[
"!cat ./src/requirements.txt",
"_____no_output_____"
]
],
[
[
"### Train with SageMaker `PyTorch` Estimator\n\nThe `PyTorch` class allows us to run our training function as a training job on SageMaker infrastructure. We need to configure it with our training script, an IAM role, the number of training instances, the training instance type, and hyperparameters. In this case we are going to run our training job on two(2) `ml.p3.2xlarge` instances. Alternatively, you can specify `ml.c4.xlarge` instances. This example can be ran on one or multiple, cpu or gpu instances ([full list of available instances](https://aws.amazon.com/sagemaker/pricing/instance-types/)). The hyperparameters parameter is a dict of values that will be passed to your training script -- you can see how to access these values in the `mnist.py` script above.",
"_____no_output_____"
],
[
"After we've constructed our `PyTorch` object, we can fit it using the data we uploaded to S3. SageMaker makes sure our data is available in the local filesystem of each worker, so our training script can simply read the data from disk.",
"_____no_output_____"
],
[
"### `fit` the Model (Approx. 15 mins)\n\nTo start a training job, we call `estimator.fit(training_data_uri)`.",
"_____no_output_____"
]
],
[
[
"from sagemaker.pytorch import PyTorch\nimport time\n\nmodel_output_path = 's3://{}/sagemaker/pytorch-mnist/training-runs'.format(bucket)\n\nmnist_estimator = PyTorch(\n entry_point='mnist_pytorch.py',\n source_dir='./src',\n output_path=model_output_path,\n role=role,\n framework_version='1.3.1',\n train_instance_count=1,\n train_instance_type='ml.c5.2xlarge',\n enable_sagemaker_metrics=True,\n hyperparameters={\n 'epochs': 5,\n 'backend': 'gloo'\n },\n # Assuming the logline from the PyTorch training job is as follows:\n # Test set: Average loss: 0.3230, Accuracy: 9103/10000 (91%)\n metric_definitions=[\n {'Name':'test:loss', 'Regex':'Test set: Average loss: (.*?),'},\n {'Name':'test:accuracy', 'Regex':'(.*?)%;'}\n ]\n)\n\nmnist_estimator.fit(inputs={'training': training_data_uri},\n wait=False)\n\ntraining_job_name = mnist_estimator.latest_training_job.name\n\nprint('training_job_name: {}'.format(training_job_name))",
"_____no_output_____"
]
],
[
[
"Attach to a training job to monitor the logs.\n\n_Note: Each instance in the training job (2 in this example) will appear as a different color in the logs. 1 color per instance._",
"_____no_output_____"
]
],
[
[
"mnist_estimator = PyTorch.attach(training_job_name=training_job_name)",
"_____no_output_____"
]
],
[
[
"## Option 1: Perform Batch Predictions Directly in the Notebook",
"_____no_output_____"
],
[
"Use PyTorch Core to load the model from `model_output_path`",
"_____no_output_____"
]
],
[
[
"!aws --region {region} s3 ls --recursive {model_output_path}/{training_job_name}/output/",
"_____no_output_____"
],
[
"!aws --region {region} s3 cp {model_output_path}/{training_job_name}/output/model.tar.gz ./model/model.tar.gz",
"_____no_output_____"
],
[
"!ls ./model",
"_____no_output_____"
],
[
"!tar -xzvf ./model/model.tar.gz -C ./model",
"_____no_output_____"
],
[
"# Based on https://github.com/pytorch/examples/blob/master/mnist/main.py\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)",
"_____no_output_____"
],
[
"import torch\n\nloaded_model = Net().to('cpu')\n# single-machine multi-gpu case or single-machine or multi-machine cpu case\nloaded_model = torch.nn.DataParallel(loaded_model)\nprint(loaded_model)",
"_____no_output_____"
],
[
"loaded_model.load_state_dict(torch.load('./model/model.pth', map_location='cpu'))",
"_____no_output_____"
],
[
"test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('./data', train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=256, \n shuffle=True\n)\n\nsingle_loaded_img = test_loader.dataset.data[0]\nsingle_loaded_img = single_loaded_img.to('cpu')\nsingle_loaded_img = single_loaded_img[None, None]\nsingle_loaded_img = single_loaded_img.type('torch.FloatTensor') # instead of DoubleTensor\n\nprint(single_loaded_img.numpy())",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\n\nplt.imshow(single_loaded_img.numpy().reshape(28, 28), cmap='Greys')",
"_____no_output_____"
],
[
"result = loaded_model(single_loaded_img)\nprediction = result.max(1, keepdim=True)[1][0][0].numpy()\nprint(prediction)",
"_____no_output_____"
]
],
[
[
"## Option 2: Create a SageMaker Endpoint and Perform REST-based Predictions",
"_____no_output_____"
],
[
"### Deploy the Trained Model to a SageMaker Endpoint (Approx. 10 mins)\n\nAfter training, we use the `PyTorch` estimator object to build and deploy a `PyTorchPredictor`. This creates a Sagemaker Endpoint -- a hosted prediction service that we can use to perform inference.\n\nAs mentioned above we have implementation of `model_fn` in the `pytorch_mnist.py` script that is required. We are going to use default implementations of `input_fn`, `predict_fn`, `output_fn` and `transform_fm` defined in [sagemaker-pytorch-containers](https://github.com/aws/sagemaker-pytorch-containers).\n\nThe arguments to the deploy function allow us to set the number and type of instances that will be used for the Endpoint. These do not need to be the same as the values we used for the training job. For example, you can train a model on a set of GPU-based instances, and then deploy the Endpoint to a fleet of CPU-based instances, but you need to make sure that you return or save your model as a cpu model similar to what we did in `mnist.py`.",
"_____no_output_____"
]
],
[
[
"predictor = mnist_estimator.deploy(initial_instance_count=1, instance_type='ml.c5.2xlarge')",
"_____no_output_____"
]
],
[
[
"### Invoke the Endpoint\n\nWe can now use this predictor to classify hand-written digits. Drawing into the image box loads the pixel data into a `data` variable in this notebook, which we can then pass to the `predictor`.",
"_____no_output_____"
]
],
[
[
"from IPython.display import HTML\nHTML(open(\"input.html\").read())",
"_____no_output_____"
]
],
[
[
"The value of `data` is retrieved from the HTML above.",
"_____no_output_____"
]
],
[
[
"print(data)",
"_____no_output_____"
],
[
"import numpy as np\n\nimage = np.array([data], dtype=np.float32)\nresponse = predictor.predict(image)\nprediction = response.argmax(axis=1)[0]\nprint(prediction)",
"_____no_output_____"
]
],
[
[
"### (Optional) Cleanup Endpoint\n\nAfter you have finished with this example, remember to delete the prediction endpoint to release the instance(s) associated with it",
"_____no_output_____"
]
],
[
[
"sagemaker.Session().delete_endpoint(predictor.endpoint)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79a525454968b9e8d635d84400bb323ceb521b0 | 111,953 | ipynb | Jupyter Notebook | ml-model/notebooks/Train Plane Detection Model.ipynb | wiseman/SkyScan | d40b4fca4b7d8a06b3bcf96da0f1a8c348f7460f | [
"Apache-2.0"
] | 1 | 2022-02-22T08:08:39.000Z | 2022-02-22T08:08:39.000Z | ml-model/notebooks/Train Plane Detection Model.ipynb | EHSJ/SkyScan | 42885b6217c154afa94c8372ed6164437929c5f5 | [
"Apache-2.0"
] | null | null | null | ml-model/notebooks/Train Plane Detection Model.ipynb | EHSJ/SkyScan | 42885b6217c154afa94c8372ed6164437929c5f5 | [
"Apache-2.0"
] | null | null | null | 58.036807 | 1,837 | 0.638545 | [
[
[
"# Train a Plane Detection Model from Voxel51 Dataset\nThis notebook trains a plane detection model using transfer learning. \nDepending on the label used, it can just detect a plane or it can try to detect the model of the plane.\nA pre-trained model is used as a starting point. This means that fewer example images are needed and the training process is faster.\n\nImages are exported from a Voxel51 Dataset into TensorFlow Records.The examples in the TFRecord are based on a selected Field from the Samples in the Voxel51 dataset. The V51 Sample field you choose should have 1 or more \"detections\", which are bounding boxes with a label.\n\nFrom: https://colab.research.google.com/drive/1sLqFKVV94wm-lglFq_0kGo2ciM0kecWD#scrollTo=wHfsJ5nWLWh9&uniqifier=1\n\nGood stuff here too: https://www.inovex.de/blog/deep-learning-mobile-tensorflow-lite/ ",
"_____no_output_____"
],
[
"## Configure the Training",
"_____no_output_____"
]
],
[
[
"training_name=\"881images-efficientdet-d0-model\" # The name for the model. All of the different directories will be based on this\nlabel_field = \"detections\" # The field from the V51 Samples around which will be used for the Labels for training.\ndataset_name = \"jsm-test-dataset\" # The name of the V51 dataset that will be used\n\n\n# Available Model Configs (You can add more from the TF2 Model Zoo)\nMODELS_CONFIG = {\n 'ssd_mobilenet_v2': {\n 'model_name': 'ssd_mobilenet_v2_320x320_coco17_tpu-8',\n 'base_pipeline_file': 'ssd_mobilenet_v2_320x320_coco17_tpu-8.config',\n 'pretrained_checkpoint': 'ssd_mobilenet_v2_320x320_coco17_tpu-8.tar.gz',\n 'batch_size': 24\n },\n 'ssd_mobilenet_v2_fpnlite': {\n 'model_name': 'ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8',\n 'base_pipeline_file': 'ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.config',\n 'pretrained_checkpoint': 'ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.tar.gz',\n 'batch_size': 18\n },\n 'efficientdet-d0': {\n 'model_name': 'efficientdet_d0_coco17_tpu-32',\n 'base_pipeline_file': 'ssd_efficientdet_d0_512x512_coco17_tpu-8.config',\n 'pretrained_checkpoint': 'efficientdet_d0_coco17_tpu-32.tar.gz',\n 'batch_size': 18\n },\n 'efficientdet-d1': {\n 'model_name': 'efficientdet_d1_coco17_tpu-32',\n 'base_pipeline_file': 'ssd_efficientdet_d1_640x640_coco17_tpu-8.config',\n 'pretrained_checkpoint': 'efficientdet_d1_coco17_tpu-32.tar.gz',\n 'batch_size': 18\n },\n 'efficientdet-d2': {\n 'model_name': 'efficientdet_d2_coco17_tpu-32',\n 'base_pipeline_file': 'ssd_efficientdet_d2_768x768_coco17_tpu-8.config',\n 'pretrained_checkpoint': 'efficientdet_d2_coco17_tpu-32.tar.gz',\n 'batch_size': 18\n },\n 'efficientdet-d3': {\n 'model_name': 'efficientdet_d3_coco17_tpu-32',\n 'base_pipeline_file': 'ssd_efficientdet_d3_896x896_coco17_tpu-32.config',\n 'pretrained_checkpoint': 'efficientdet_d3_coco17_tpu-32.tar.gz',\n 'batch_size': 18\n }\n}\n\n# change chosen model to deploy different models \nchosen_model = 'efficientdet-d0' #'ssd_mobilenet_v2'\n\nnum_steps = 40000 # The more steps, the longer the training. Increase if your loss function is still decreasing and validation metrics are increasing. \nnum_eval_steps = 500 # Perform evaluation after so many steps\n\n",
"_____no_output_____"
],
[
"# The different directories and filenames to use\ntrain_record_fname = \"/tf/dataset-export/\" + training_name + \"/train/tf.records\"\nval_record_fname = \"/tf/dataset-export/\" + training_name + \"/val/tf.records\"\nval_export_dir = \"/tf/dataset-export/\" + training_name + \"/val/\"\ntrain_export_dir = \"/tf/dataset-export/\" + training_name + \"/train/\"\nmodel_export_dir = \"/tf/model-export/\" + training_name +\"/\"\n\nlabel_map_file = \"/tf/dataset-export/\" + training_name + \"/label_map.pbtxt\"\n\nmodel_name = MODELS_CONFIG[chosen_model]['model_name']\npretrained_checkpoint = MODELS_CONFIG[chosen_model]['pretrained_checkpoint']\nbase_pipeline_file = MODELS_CONFIG[chosen_model]['base_pipeline_file']\nbatch_size = MODELS_CONFIG[chosen_model]['batch_size'] #if you can fit a large batch in memory, it may speed up your training\n\npipeline_fname = '/tf/models/research/deploy/' + base_pipeline_file\nfine_tune_checkpoint = '/tf/models/research/deploy/' + model_name + '/checkpoint/ckpt-0'\npipeline_file = '/tf/models/research/deploy/pipeline_file.config'\nmodel_dir = '/tf/training/'+training_name+'/'",
"_____no_output_____"
],
[
"# Install the different packages needed\n#! apt install -y protobuf-compiler libgl1-mesa-glx wget",
"Reading package lists... Done\nBuilding dependency tree \nReading state information... Done\nprotobuf-compiler is already the newest version (3.0.0-9.1ubuntu1).\nlibgl1-mesa-glx is already the newest version (20.0.8-0ubuntu1~18.04.1).\nwget is already the newest version (1.19.4-1ubuntu2.2).\n0 upgraded, 0 newly installed, 0 to remove and 21 not upgraded.\n"
]
],
[
[
"## Download and Install TF Models\nThe TF Object Detection API is available here: https://github.com/tensorflow/models",
"_____no_output_____"
]
],
[
[
"import os\nimport pathlib\n\n# Clone the tensorflow models repository if it doesn't already exist\nif \"models\" in pathlib.Path.cwd().parts:\n while \"models\" in pathlib.Path.cwd().parts:\n os.chdir('..')\nelif not pathlib.Path('models').exists():\n # pull v2.5.0 of tensorflow models to make deterministic \n !git clone --depth 1 https://github.com/tensorflow/models/tree/v2.5.0 /tf/models",
"fatal: destination path '/tf/models' already exists and is not an empty directory.\r\n"
],
[
"%%bash\ncd /tf/models/research\nls\nprotoc object_detection/protos/*.proto --python_out=.\ncp object_detection/packages/tf2/setup.py .\npython -m pip install .",
"README.md\nadversarial_text\nattention_ocr\naudioset\nautoaugment\ncognitive_planning\ncvt_text\ndeep_speech\ndeeplab\ndelf\ndeploy\nefficient-hrl\nlfads\nlstm_object_detection\nmarco\nnst_blogpost\nobject_detection\npcl_rl\nrebar\nseq_flow_lite\nsetup.py\nslim\nvid2depth\nProcessing /tf/models/research\nRequirement already satisfied: avro-python3 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (1.10.2)\nRequirement already satisfied: apache-beam in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (2.30.0)\nRequirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (8.2.0)\nRequirement already satisfied: lxml in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (4.6.3)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (3.3.4)\nRequirement already satisfied: Cython in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (0.29.23)\nRequirement already satisfied: contextlib2 in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (0.6.0.post1)\nRequirement already satisfied: tf-slim in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (1.1.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (1.15.0)\nRequirement already satisfied: pycocotools in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (2.0.2)\nRequirement already satisfied: lvis in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (0.5.3)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (1.5.4)\nRequirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (1.1.5)\nRequirement already satisfied: tf-models-official in /usr/local/lib/python3.6/dist-packages (from object-detection==0.1) (2.5.0)\nRequirement already satisfied: fastavro<2,>=0.21.4 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (1.4.1)\nRequirement already satisfied: dill<0.3.2,>=0.3.1.1 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (0.3.1.1)\nRequirement already satisfied: pymongo<4.0.0,>=3.8.0 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (3.11.4)\nRequirement already satisfied: requests<3.0.0,>=2.24.0 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (2.25.1)\nRequirement already satisfied: typing-extensions<3.8.0,>=3.7.0 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (3.7.4.3)\nRequirement already satisfied: numpy<1.21.0,>=1.14.3 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (1.19.5)\nRequirement already satisfied: protobuf<4,>=3.12.2 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (3.17.0)\nRequirement already satisfied: dataclasses; python_version < \"3.7\" in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (0.8)\nRequirement already satisfied: crcmod<2.0,>=1.7 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (1.7)\nRequirement already satisfied: pytz>=2018.3 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (2021.1)\nRequirement already satisfied: grpcio<2,>=1.29.0 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (1.34.1)\nRequirement already satisfied: hdfs<3.0.0,>=2.1.0 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (2.6.0)\nRequirement already satisfied: oauth2client<5,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (4.1.3)\nRequirement already satisfied: pydot<2,>=1.2.0 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (1.4.2)\nRequirement already satisfied: pyarrow<4.0.0,>=0.15.1 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (3.0.0)\nRequirement already satisfied: python-dateutil<3,>=2.8.0 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (2.8.1)\nRequirement already satisfied: httplib2<0.20.0,>=0.8 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (0.19.1)\nRequirement already satisfied: future<1.0.0,>=0.18.2 in /usr/local/lib/python3.6/dist-packages (from apache-beam->object-detection==0.1) (0.18.2)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /usr/local/lib/python3.6/dist-packages (from matplotlib->object-detection==0.1) (2.4.7)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->object-detection==0.1) (0.10.0)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->object-detection==0.1) (1.3.1)\nRequirement already satisfied: absl-py>=0.2.2 in /usr/local/lib/python3.6/dist-packages (from tf-slim->object-detection==0.1) (0.12.0)\nRequirement already satisfied: setuptools>=18.0 in /usr/local/lib/python3.6/dist-packages (from pycocotools->object-detection==0.1) (56.2.0)\nRequirement already satisfied: opencv-python>=4.1.0.25 in /usr/local/lib/python3.6/dist-packages (from lvis->object-detection==0.1) (4.5.2.54)\nRequirement already satisfied: sacrebleu in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (1.5.1)\nRequirement already satisfied: google-api-python-client>=1.6.7 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (2.10.0)\nRequirement already satisfied: kaggle>=1.3.9 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (1.5.12)\nRequirement already satisfied: py-cpuinfo>=3.3.0 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (8.0.0)\nRequirement already satisfied: gin-config in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (0.4.0)\nRequirement already satisfied: tensorflow-datasets in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (4.3.0)\nRequirement already satisfied: sentencepiece in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (0.1.96)\nRequirement already satisfied: psutil>=5.4.3 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (5.8.0)\nRequirement already satisfied: tensorflow>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (2.5.0)\nRequirement already satisfied: opencv-python-headless in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (4.4.0.46)\nRequirement already satisfied: tensorflow-hub>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (0.12.0)\nRequirement already satisfied: google-cloud-bigquery>=0.31.0 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (2.20.0)\nRequirement already satisfied: seqeval in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (1.2.2)\nRequirement already satisfied: tensorflow-model-optimization>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (0.6.0)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (5.4.1)\nRequirement already satisfied: tensorflow-addons in /usr/local/lib/python3.6/dist-packages (from tf-models-official->object-detection==0.1) (0.13.0)\nRequirement already satisfied: chardet<5,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (4.0.0)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (2020.12.5)\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (1.26.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/lib/python3/dist-packages (from requests<3.0.0,>=2.24.0->apache-beam->object-detection==0.1) (2.6)\nRequirement already satisfied: docopt in /usr/local/lib/python3.6/dist-packages (from hdfs<3.0.0,>=2.1.0->apache-beam->object-detection==0.1) (0.6.2)\nRequirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from oauth2client<5,>=2.0.1->apache-beam->object-detection==0.1) (4.7.2)\nRequirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.6/dist-packages (from oauth2client<5,>=2.0.1->apache-beam->object-detection==0.1) (0.4.8)\nRequirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.6/dist-packages (from oauth2client<5,>=2.0.1->apache-beam->object-detection==0.1) (0.2.8)\nRequirement already satisfied: portalocker==2.0.0 in /usr/local/lib/python3.6/dist-packages (from sacrebleu->tf-models-official->object-detection==0.1) (2.0.0)\nRequirement already satisfied: google-api-core<2dev,>=1.21.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (1.30.0)\nRequirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (3.0.1)\nRequirement already satisfied: google-auth-httplib2>=0.1.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (0.1.0)\nRequirement already satisfied: google-auth<2dev,>=1.16.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (1.30.0)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from kaggle>=1.3.9->tf-models-official->object-detection==0.1) (4.61.1)\nRequirement already satisfied: python-slugify in /usr/local/lib/python3.6/dist-packages (from kaggle>=1.3.9->tf-models-official->object-detection==0.1) (5.0.2)\nRequirement already satisfied: termcolor in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (1.1.0)\nRequirement already satisfied: tensorflow-metadata in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (1.1.0)\nRequirement already satisfied: attrs>=18.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (21.2.0)\nRequirement already satisfied: importlib-resources; python_version < \"3.9\" in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (5.1.4)\nRequirement already satisfied: promise in /usr/local/lib/python3.6/dist-packages (from tensorflow-datasets->tf-models-official->object-detection==0.1) (2.3)\nRequirement already satisfied: tensorboard~=2.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (2.5.0)\nRequirement already satisfied: tensorflow-estimator<2.6.0,>=2.5.0rc0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (2.5.0rc0)\nRequirement already satisfied: keras-preprocessing~=1.1.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (1.1.2)\nRequirement already satisfied: h5py~=3.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (3.1.0)\nRequirement already satisfied: google-pasta~=0.2 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (0.2.0)\nRequirement already satisfied: flatbuffers~=1.12.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (1.12)\nRequirement already satisfied: wheel~=0.35 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (0.36.2)\nRequirement already satisfied: opt-einsum~=3.3.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (3.3.0)\nRequirement already satisfied: wrapt~=1.12.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (1.12.1)\nRequirement already satisfied: gast==0.4.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (0.4.0)\nRequirement already satisfied: astunparse~=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (1.6.3)\nRequirement already satisfied: keras-nightly~=2.5.0.dev in /usr/local/lib/python3.6/dist-packages (from tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (2.5.0.dev2021032900)\nRequirement already satisfied: google-resumable-media<2.0dev,>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (1.3.1)\nRequirement already satisfied: proto-plus>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (1.18.1)\nRequirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.6/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (20.9)\nRequirement already satisfied: google-cloud-core<2.0dev,>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (1.7.1)\nRequirement already satisfied: scikit-learn>=0.21.3 in /usr/local/lib/python3.6/dist-packages (from seqeval->tf-models-official->object-detection==0.1) (0.24.2)\nRequirement already satisfied: dm-tree~=0.1.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow-model-optimization>=0.4.1->tf-models-official->object-detection==0.1) (0.1.6)\nRequirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.6/dist-packages (from tensorflow-addons->tf-models-official->object-detection==0.1) (2.12.1)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from google-api-core<2dev,>=1.21.0->google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (1.53.0)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2dev,>=1.16.0->google-api-python-client>=1.6.7->tf-models-official->object-detection==0.1) (4.2.2)\nRequirement already satisfied: text-unidecode>=1.3 in /usr/local/lib/python3.6/dist-packages (from python-slugify->kaggle>=1.3.9->tf-models-official->object-detection==0.1) (1.3)\nRequirement already satisfied: zipp>=3.1.0; python_version < \"3.10\" in /usr/local/lib/python3.6/dist-packages (from importlib-resources; python_version < \"3.9\"->tensorflow-datasets->tf-models-official->object-detection==0.1) (3.4.1)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (1.8.0)\nRequirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (0.6.1)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (2.0.0)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (3.3.4)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (0.4.4)\nRequirement already satisfied: cached-property; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from h5py~=3.1.0->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (1.5.2)\nRequirement already satisfied: google-crc32c<2.0dev,>=1.0; python_version >= \"3.5\" in /usr/local/lib/python3.6/dist-packages (from google-resumable-media<2.0dev,>=0.6.0->google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (1.1.2)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official->object-detection==0.1) (1.0.1)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-learn>=0.21.3->seqeval->tf-models-official->object-detection==0.1) (2.1.0)\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (4.0.1)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (1.3.0)\nRequirement already satisfied: cffi>=1.0.0 in /usr/local/lib/python3.6/dist-packages (from google-crc32c<2.0dev,>=1.0; python_version >= \"3.5\"->google-resumable-media<2.0dev,>=0.6.0->google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (1.14.5)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard~=2.5->tensorflow>=2.5.0->tf-models-official->object-detection==0.1) (3.1.0)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.6/dist-packages (from cffi>=1.0.0->google-crc32c<2.0dev,>=1.0; python_version >= \"3.5\"->google-resumable-media<2.0dev,>=0.6.0->google-cloud-bigquery>=0.31.0->tf-models-official->object-detection==0.1) (2.20)\nBuilding wheels for collected packages: object-detection\n Building wheel for object-detection (setup.py): started\n Building wheel for object-detection (setup.py): finished with status 'done'\n Created wheel for object-detection: filename=object_detection-0.1-py3-none-any.whl size=1650080 sha256=dcfc104b19e566c43faabaae7862c4923479cfe3849ae11e9717fb1fb9aabeee\n Stored in directory: /tmp/pip-ephem-wheel-cache-qv3hkweu/wheels/1b/00/50/d3675d90b11a88efdd99ed80b60a2b19e5769a0bb333440375\nSuccessfully built object-detection\nInstalling collected packages: object-detection\n Attempting uninstall: object-detection\n Found existing installation: object-detection 0.1\n Uninstalling object-detection-0.1:\n Successfully uninstalled object-detection-0.1\nSuccessfully installed object-detection-0.1\n"
],
[
"import matplotlib\nimport matplotlib.pyplot as plt\n\nimport os\nimport random\nimport io\nimport imageio\nimport scipy.misc\nimport numpy as np\nfrom six import BytesIO\nfrom PIL import Image, ImageDraw, ImageFont\nfrom IPython.display import display, Javascript\nfrom IPython.display import Image as IPyImage\n\nimport tensorflow as tf\nfrom object_detection.protos.string_int_label_map_pb2 import StringIntLabelMap, StringIntLabelMapItem\nfrom google.protobuf import text_format\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import config_util\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom object_detection.builders import model_builder\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Export the Training and Val Dataset from Voxel 51",
"_____no_output_____"
]
],
[
[
"import fiftyone as fo\nimport math\ndataset = fo.load_dataset(dataset_name)\n",
"_____no_output_____"
]
],
[
[
"### Explore the dataset content\nHere are some basic stats on the Voxel51 dataset you are going to build training the model on. \nAn example of the samples is also printed out. In the Sample, make sure the *label_field* you selected has some detections in it.",
"_____no_output_____"
]
],
[
[
"print(\"\\t\\tDataset\\n-----------------------------------\")\nview = dataset.match_tags(\"training\").shuffle(seed=51) # You can add additional things to the query to further refine it. eg .match_tags(\"good_box\")\nprint(view)\nprint(\"\\n\\n\\tExample Sample\\n-----------------------------------\")\nprint(view.first())\n",
"\t\tDataset\n-----------------------------------\nDataset: jsm-test-dataset\nMedia type: image\nNum samples: 881\nTags: ['capture-3-29', 'capture-3-30', 'capture-5-13', 'training']\nSample fields:\n id: fiftyone.core.fields.ObjectIdField\n filepath: fiftyone.core.fields.StringField\n tags: fiftyone.core.fields.ListField(fiftyone.core.fields.StringField)\n metadata: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.metadata.Metadata)\n external_id: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n bearing: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n elevation: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n distance: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n icao24: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n model: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n manufacturer: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n norm_model: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n labelbox_id: fiftyone.core.fields.StringField\n detections: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n operatorcallsign: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\n predict_model: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n dolt_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n dolt_40k_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n dolt_bg_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n dolt_400_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n 400_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n 400_aug_5k_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n 914_mega_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n 914_40k_predict: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n 914_40k_predict_full: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\n eval_tp: fiftyone.core.fields.IntField\n eval_fp: fiftyone.core.fields.IntField\n eval_fn: fiftyone.core.fields.IntField\nView stages:\n 1. MatchTags(tags=['training'], bool=True)\n 2. Shuffle(seed=51)\n\n\n\tExample Sample\n-----------------------------------\n<SampleView: {\n 'id': '60a3bf2ef3610f2b7a828f88',\n 'media_type': 'image',\n 'filepath': '/tf/media/capture-5-13/Airbus Industrie A321-211/a11eb7_275_77_9303_2021-05-13-11-17-33.jpg',\n 'tags': BaseList(['training', 'capture-5-13']),\n 'metadata': <ImageMetadata: {\n 'size_bytes': 489161,\n 'mime_type': 'image/jpeg',\n 'width': 1920,\n 'height': 1080,\n 'num_channels': 3,\n }>,\n 'external_id': <Classification: {\n 'id': '60a3bf2ef3610f2b7a828f83',\n 'tags': BaseList([]),\n 'label': 'a11eb7_275_77_9303_2021-05-13-11-17-33',\n 'confidence': None,\n 'logits': None,\n }>,\n 'bearing': <Classification: {\n 'id': '60a3bf2ef3610f2b7a828f84',\n 'tags': BaseList([]),\n 'label': '275',\n 'confidence': None,\n 'logits': None,\n }>,\n 'elevation': <Classification: {\n 'id': '60a3bf2ef3610f2b7a828f85',\n 'tags': BaseList([]),\n 'label': '77',\n 'confidence': None,\n 'logits': None,\n }>,\n 'distance': <Classification: {\n 'id': '60a3bf2ef3610f2b7a828f86',\n 'tags': BaseList([]),\n 'label': '9303',\n 'confidence': None,\n 'logits': None,\n }>,\n 'icao24': <Classification: {\n 'id': '60a3bf2ef3610f2b7a828f87',\n 'tags': BaseList([]),\n 'label': 'a11eb7',\n 'confidence': None,\n 'logits': None,\n }>,\n 'model': <Classification: {\n 'id': '60d5e7045e08d80243cba70b',\n 'tags': BaseList([]),\n 'label': 'A321-211',\n 'confidence': None,\n 'logits': None,\n }>,\n 'manufacturer': <Classification: {\n 'id': '60d5e7045e08d80243cba70c',\n 'tags': BaseList([]),\n 'label': 'Airbus Industrie',\n 'confidence': None,\n 'logits': None,\n }>,\n 'norm_model': <Classification: {\n 'id': '60d5e7ba5e08d80243ce5c32',\n 'tags': BaseList([]),\n 'label': 'A321',\n 'confidence': None,\n 'logits': None,\n }>,\n 'labelbox_id': 'ckou3l1fk9ns80y99bzz3fusq',\n 'detections': <Detections: {\n 'detections': BaseList([\n <Detection: {\n 'id': '60e702f215f87e1a607696c7',\n 'attributes': BaseDict({}),\n 'tags': BaseList([]),\n 'label': 'plane',\n 'bounding_box': BaseList([\n 0.5755208333333334,\n 0.40185185185185185,\n 0.121875,\n 0.16296296296296298,\n ]),\n 'mask': None,\n 'confidence': None,\n 'index': None,\n }>,\n ]),\n }>,\n 'operatorcallsign': <Classification: {\n 'id': '60d5e7045e08d80243cba70d',\n 'tags': BaseList([]),\n 'label': 'AMERICAN',\n 'confidence': None,\n 'logits': None,\n }>,\n 'predict_model': <Detections: {\n 'detections': BaseList([\n <Detection: {\n 'id': '60d0ccaf218c23e19b1f9e85',\n 'attributes': BaseDict({}),\n 'tags': BaseList([]),\n 'label': 'plane',\n 'bounding_box': BaseList([\n 0.5768705606460571,\n 0.4023531973361969,\n 0.12218141555786133,\n 0.16684052348136902,\n ]),\n 'mask': None,\n 'confidence': 0.9999737739562988,\n 'index': None,\n }>,\n ]),\n }>,\n 'dolt_predict': <Detections: {\n 'detections': BaseList([\n <Detection: {\n 'id': '60d35f37218c23e19b1fb5d0',\n 'attributes': BaseDict({}),\n 'tags': BaseList([]),\n 'label': 'plane',\n 'bounding_box': BaseList([\n 0.5762593746185303,\n 0.40124379263983834,\n 0.12189579010009766,\n 0.16604603661431205,\n ]),\n 'mask': None,\n 'confidence': 0.9999697208404541,\n 'index': None,\n }>,\n ]),\n }>,\n 'dolt_40k_predict': <Detections: {\n 'detections': BaseList([\n <Detection: {\n 'id': '60d543ac20cf8e383b417810',\n 'attributes': BaseDict({}),\n 'tags': BaseList([]),\n 'label': 'plane',\n 'bounding_box': BaseList([\n 0.5774428248405457,\n 0.3997461001078288,\n 0.11862397193908691,\n 0.1671259138319227,\n ]),\n 'mask': None,\n 'confidence': 1.0,\n 'index': None,\n }>,\n ]),\n }>,\n 'dolt_bg_predict': None,\n 'dolt_400_predict': None,\n '400_predict': <Detections: {\n 'detections': BaseList([\n <Detection: {\n 'id': '60d7ee853f3353cca1d7c600',\n 'attributes': BaseDict({}),\n 'tags': BaseList([]),\n 'label': 'plane',\n 'bounding_box': BaseList([\n 0.5755680799484253,\n 0.4013982084062364,\n 0.12205994129180908,\n 0.16460511419508195,\n ]),\n 'mask': None,\n 'confidence': 1.0,\n 'index': None,\n }>,\n ]),\n }>,\n '400_aug_5k_predict': None,\n '914_mega_predict': None,\n '914_40k_predict': <Detections: {\n 'detections': BaseList([\n <Detection: {\n 'id': '60e4805010424668fddefc67',\n 'attributes': BaseDict({}),\n 'tags': BaseList([]),\n 'label': 'plane',\n 'bounding_box': BaseList([\n 0.5735075355817875,\n 0.40489327907562256,\n 0.12403148040175438,\n 0.1557837724685669,\n ]),\n 'mask': None,\n 'confidence': 0.9985151886940002,\n 'index': None,\n }>,\n ]),\n }>,\n '914_40k_predict_full': None,\n 'eval_tp': None,\n 'eval_fp': None,\n 'eval_fn': None,\n}>\n"
]
],
[
[
"### Export the dataset into TFRecords\nThe selected dataset samples will be exported to TensorFlow Records (TFRecords). They will be split between Training and Validation. The ratio can be adjusted below. You only need to do this once to build the dataset. If you run this a second time with the same **model_name** additional samples will be appended to the end.",
"_____no_output_____"
]
],
[
[
"# The Dataset or DatasetView to export\nsample_len = len(view)\nval_len = math.floor(sample_len * 0.2)\ntrain_len = math.floor(sample_len * 0.8)\nprint(\"Total: {} Val: {} Train: {}\".format(sample_len,val_len,train_len))\nval_view = view.take(val_len)\ntrain_view = view.skip(val_len).take(train_len)\n# Export the dataset\nval_view.export(\n export_dir=val_export_dir,\n dataset_type=fo.types.TFObjectDetectionDataset,#fo.types.COCODetectionDataset,#fo.types.TFObjectDetectionDataset,\n label_field=label_field,\n)\n\ntrain_view.export(\n export_dir=train_export_dir,\n dataset_type=fo.types.TFObjectDetectionDataset,#fo.types.COCODetectionDataset,#fo.types.TFObjectDetectionDataset,\n label_field=label_field,\n)",
"Total: 881 Val: 176 Train: 704\n 100% |█████████████████| 176/176 [4.1s elapsed, 0s remaining, 52.9 samples/s] \n 100% |█████████████████| 704/704 [13.2s elapsed, 0s remaining, 54.4 samples/s] \n"
]
],
[
[
"## Create a file with the Labels for the objects\nThe TF2 Object Detection API looks for a map of the labels used and a corresponding Id. You can build a list of the unique classnames by itterating the dataset. You can also just hardcode it if there only a few.",
"_____no_output_____"
]
],
[
[
"def convert_classes(classes, start=1):\n msg = StringIntLabelMap()\n for id, name in enumerate(classes, start=start):\n msg.item.append(StringIntLabelMapItem(id=id, name=name))\n\n text = str(text_format.MessageToBytes(msg, as_utf8=True), 'utf-8')\n return text",
"_____no_output_____"
],
[
"# If labelfield is a classification\nclass_names=[]\nfor sample in view.select_fields(label_field):\n if sample[label_field].label not in class_names:\n class_names.append(sample[label_field].label)\nprint(class_names)",
"_____no_output_____"
],
[
"# If labelfield is detections\nclass_names=[]\nfor sample in view.select_fields(label_field):\n if sample[label_field] is not None:\n for detection in sample[label_field].detections: \n label = detection[\"label\"]\n if label not in class_names:\n class_names.append(label)\nprint(class_names)",
"['plane']\n"
],
[
"# You can hard wire it too\nclass_names=[\"plane\"]",
"_____no_output_____"
],
[
"\ntxt = convert_classes(class_names)\nprint(txt)\nwith open(label_map_file, 'w') as f:\n f.write(txt)",
"item {\n name: \"plane\"\n id: 1\n}\n\n"
]
],
[
[
"## Download a pretrained Model & default Config\nA list of the models can be found here: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf2_detection_zoo.md\n\nThe configs are here: https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/configs/tf2/",
"_____no_output_____"
]
],
[
[
"#download pretrained weights\n%mkdir /tf/models/research/deploy/\n%cd /tf/models/research/deploy/\nimport tarfile\ndownload_tar = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/' + pretrained_checkpoint\n\n!wget {download_tar}\ntar = tarfile.open(pretrained_checkpoint)\ntar.extractall()\ntar.close()",
"_____no_output_____"
],
[
"#download base training configuration file\n%cd /tf/models/research/deploy\ndownload_config = 'https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/configs/tf2/' + base_pipeline_file\n!wget {download_config}",
"/tf/models/research/deploy\n--2021-07-08 20:52:50-- https://raw.githubusercontent.com/tensorflow/models/master/research/object_detection/configs/tf2/ssd_efficientdet_d0_512x512_coco17_tpu-8.config\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.110.133, 185.199.111.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 4630 (4.5K) [text/plain]\nSaving to: ‘ssd_efficientdet_d0_512x512_coco17_tpu-8.config.8’\n\nssd_efficientdet_d0 100%[===================>] 4.52K --.-KB/s in 0s \n\n2021-07-08 20:52:50 (43.3 MB/s) - ‘ssd_efficientdet_d0_512x512_coco17_tpu-8.config.8’ saved [4630/4630]\n\n"
]
],
[
[
"## Build the Config for training\nThe default config for the model being trained needs to be updated with the correct parameters and paths to the data. This just adds some standard settings, you may need to do some additional tuning if the training is not working well.",
"_____no_output_____"
]
],
[
[
"# Gets the total number of classes from the Label Map\n\ndef get_num_classes(pbtxt_fname):\n from object_detection.utils import label_map_util\n label_map = label_map_util.load_labelmap(pbtxt_fname)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=90, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n return len(category_index.keys())\nnum_classes = get_num_classes(label_map_file)\nprint(\"working with {} classes\".format(num_classes))",
"working with 1 classes\n"
]
],
[
[
"You may need to adjust the learning rate section below. The number used here are from the EfficentDet config. I noticed that this learning rate worked well for the small bounding boxes I was using when planes were at a high altitude. You can try increasing it if the planes take up more of the image. If the initial loss rates are high (>0) that is a probably a sign that you should adjust the Learning Rate.\n\nYou may also want to look at other aspects of the config file. They set the parameters for the model training and may need to be adjusted based on the Model Architecture you are using and the images you are training on.",
"_____no_output_____"
]
],
[
[
"# write custom configuration file by slotting our dataset, model checkpoint, and training parameters into the base pipeline file\n\nimport re\n\n%cd /tf/models/research/deploy\nprint('writing custom configuration file')\n\nwith open(pipeline_fname) as f:\n s = f.read()\nwith open('pipeline_file.config', 'w') as f:\n \n # fine_tune_checkpoint\n s = re.sub('fine_tune_checkpoint: \".*?\"',\n 'fine_tune_checkpoint: \"{}\"'.format(fine_tune_checkpoint), s)\n \n # tfrecord files train and test.\n s = re.sub(\n '(input_path: \".*?)(PATH_TO_BE_CONFIGURED/train)(.*?\")', 'input_path: \"{}\"'.format(train_record_fname), s)\n s = re.sub(\n '(input_path: \".*?)(PATH_TO_BE_CONFIGURED/val)(.*?\")', 'input_path: \"{}\"'.format(val_record_fname), s)\n\n # label_map_path\n s = re.sub(\n 'label_map_path: \".*?\"', 'label_map_path: \"{}\"'.format(label_map_file), s)\n\n # Set training batch_size.\n s = re.sub('batch_size: [0-9]+',\n 'batch_size: {}'.format(batch_size), s)\n\n # Set training steps, num_steps\n s = re.sub('num_steps: [0-9]+',\n 'num_steps: {}'.format(num_steps), s)\n \n # Set learning_rate_base in learning_rate, sane default\n# s = re.sub('learning_rate_base: [.0-9]+',\n# 'learning_rate_base: {}'.format(\"8e-2\"), s)\n \n # Set warmup_learning_rate in learning_rate, sane default\n s = re.sub('warmup_learning_rate: [.0-9]+',\n 'warmup_learning_rate: {}'.format(.001), s)\n \n # Set warmup_steps in learning_rate, sane default\n s = re.sub('warmup_steps: [.0-9]+',\n 'warmup_steps: {}'.format(2500), s)\n \n # Set total_steps in learning_rate, num_steps\n s = re.sub('total_steps: [0-9]+',\n 'total_steps: {}'.format(num_steps), s)\n \n # Set number of classes num_classes.\n s = re.sub('num_classes: [0-9]+',\n 'num_classes: {}'.format(num_classes), s)\n \n # Setup the data augmentation preprocessor - not sure if this is a good one to use, commenting out for now and going with defaults.\n #s = re.sub('random_scale_crop_and_pad_to_square {\\s+output_size: 896\\s+scale_min: 0.1\\s+scale_max: 2.0\\s+}',\n # 'random_crop_image {\\n\\tmin_object_covered: 1.0\\n\\tmin_aspect_ratio: 0.75\\n\\tmax_aspect_ratio: 1.5\\n\\tmin_area: 0.25\\n\\tmax_area: 0.875\\n\\toverlap_thresh: 0.5\\n\\trandom_coef: 0.125\\n}',s, flags=re.MULTILINE)\n \n #s = re.sub('ssd_random_crop {\\s+}',\n # 'random_crop_image {\\n\\tmin_object_covered: 1.0\\n\\tmin_aspect_ratio: 0.75\\n\\tmax_aspect_ratio: 1.5\\n\\tmin_area: 0.10\\n\\tmax_area: 0.75\\n\\toverlap_thresh: 0.5\\n\\trandom_coef: 0.125\\n}',s, flags=re.MULTILINE)\n \n # replacing the default data augmentation with something more comprehensive\n # the available options are listed here: https://github.com/tensorflow/models/blob/master/research/object_detection/protos/preprocessor.proto\n \n data_augmentation = (\"data_augmentation_options {\\n random_distort_color: { \\n } \\n}\\n\\n\"\n \"data_augmentation_options {\\n random_horizontal_flip: { \\n } \\n}\\n\\n\"\n \"data_augmentation_options {\\n random_vertical_flip: { \\n } \\n}\\n\\n\"\n \"data_augmentation_options {\\n random_rotation90: { \\n } \\n}\\n\\n\"\n \"data_augmentation_options {\\n random_jitter_boxes: { \\n } \\n}\\n\\n\"\n \"data_augmentation_options {\\n random_crop_image {\\n\\tmin_object_covered: 1.0\\n\\tmin_aspect_ratio: 0.95\\n\\tmax_aspect_ratio: 1.05\\n\\tmin_area: 0.25\\n\\tmax_area: 0.875\\n\\toverlap_thresh: 0.9\\n\\trandom_coef: 0.5\\n}\\n}\\n\\n\"\n \"data_augmentation_options {\\n random_jpeg_quality: {\\n\\trandom_coef: 0.5\\n\\tmin_jpeg_quality: 40\\n\\tmax_jpeg_quality: 90\\n } \\n}\\n\\n\"\n )\n \n s = re.sub('data_augmentation_options {[\\s\\w]*{[\\s\\w\\:\\.]*}\\s*}\\s* data_augmentation_options {[\\s\\w]*{[\\s\\w\\:\\.]*}\\s*}',\n data_augmentation,s, flags=re.MULTILINE)\n \n \n \n #fine-tune checkpoint type\n s = re.sub(\n 'fine_tune_checkpoint_type: \"classification\"', 'fine_tune_checkpoint_type: \"{}\"'.format('detection'), s)\n \n f.write(s)",
"/tf/models/research/deploy\nwriting custom configuration file\n"
],
[
"%cat /tf/models/research/deploy/pipeline_file.config",
" # SSD with EfficientNet-b0 + BiFPN feature extractor,\r\n# shared box predictor and focal loss (a.k.a EfficientDet-d0).\r\n# See EfficientDet, Tan et al, https://arxiv.org/abs/1911.09070\r\n# See Lin et al, https://arxiv.org/abs/1708.02002\r\n# Trained on COCO, initialized from an EfficientNet-b0 checkpoint.\r\n#\r\n# Train on TPU-8\r\n\r\nmodel {\r\n ssd {\r\n inplace_batchnorm_update: true\r\n freeze_batchnorm: false\r\n num_classes: 1\r\n add_background_class: false\r\n box_coder {\r\n faster_rcnn_box_coder {\r\n y_scale: 10.0\r\n x_scale: 10.0\r\n height_scale: 5.0\r\n width_scale: 5.0\r\n }\r\n }\r\n matcher {\r\n argmax_matcher {\r\n matched_threshold: 0.5\r\n unmatched_threshold: 0.5\r\n ignore_thresholds: false\r\n negatives_lower_than_unmatched: true\r\n force_match_for_each_row: true\r\n use_matmul_gather: true\r\n }\r\n }\r\n similarity_calculator {\r\n iou_similarity {\r\n }\r\n }\r\n encode_background_as_zeros: true\r\n anchor_generator {\r\n multiscale_anchor_generator {\r\n min_level: 3\r\n max_level: 7\r\n anchor_scale: 4.0\r\n aspect_ratios: [1.0, 2.0, 0.5]\r\n scales_per_octave: 3\r\n }\r\n }\r\n image_resizer {\r\n keep_aspect_ratio_resizer {\r\n min_dimension: 512\r\n max_dimension: 512\r\n pad_to_max_dimension: true\r\n }\r\n }\r\n box_predictor {\r\n weight_shared_convolutional_box_predictor {\r\n depth: 64\r\n class_prediction_bias_init: -4.6\r\n conv_hyperparams {\r\n force_use_bias: true\r\n activation: SWISH\r\n regularizer {\r\n l2_regularizer {\r\n weight: 0.00004\r\n }\r\n }\r\n initializer {\r\n random_normal_initializer {\r\n stddev: 0.01\r\n mean: 0.0\r\n }\r\n }\r\n batch_norm {\r\n scale: true\r\n decay: 0.99\r\n epsilon: 0.001\r\n }\r\n }\r\n num_layers_before_predictor: 3\r\n kernel_size: 3\r\n use_depthwise: true\r\n }\r\n }\r\n feature_extractor {\r\n type: 'ssd_efficientnet-b0_bifpn_keras'\r\n bifpn {\r\n min_level: 3\r\n max_level: 7\r\n num_iterations: 3\r\n num_filters: 64\r\n }\r\n conv_hyperparams {\r\n force_use_bias: true\r\n activation: SWISH\r\n regularizer {\r\n l2_regularizer {\r\n weight: 0.00004\r\n }\r\n }\r\n initializer {\r\n truncated_normal_initializer {\r\n stddev: 0.03\r\n mean: 0.0\r\n }\r\n }\r\n batch_norm {\r\n scale: true,\r\n decay: 0.99,\r\n epsilon: 0.001,\r\n }\r\n }\r\n }\r\n loss {\r\n classification_loss {\r\n weighted_sigmoid_focal {\r\n alpha: 0.25\r\n gamma: 1.5\r\n }\r\n }\r\n localization_loss {\r\n weighted_smooth_l1 {\r\n }\r\n }\r\n classification_weight: 1.0\r\n localization_weight: 1.0\r\n }\r\n normalize_loss_by_num_matches: true\r\n normalize_loc_loss_by_codesize: true\r\n post_processing {\r\n batch_non_max_suppression {\r\n score_threshold: 1e-8\r\n iou_threshold: 0.5\r\n max_detections_per_class: 100\r\n max_total_detections: 100\r\n }\r\n score_converter: SIGMOID\r\n }\r\n }\r\n}\r\n\r\ntrain_config: {\r\n fine_tune_checkpoint: \"/tf/models/research/deploy/efficientdet_d0_coco17_tpu-32/checkpoint/ckpt-0\"\r\n fine_tune_checkpoint_version: V2\r\n fine_tune_checkpoint_type: \"detection\"\r\n batch_size: 18\r\n sync_replicas: true\r\n startup_delay_steps: 0\r\n replicas_to_aggregate: 8\r\n use_bfloat16: true\r\n num_steps: 5000\r\n data_augmentation_options {\r\n random_distort_color: { \r\n } \r\n}\r\n\r\ndata_augmentation_options {\r\n random_horizontal_flip: { \r\n } \r\n}\r\n\r\ndata_augmentation_options {\r\n random_vertical_flip: { \r\n } \r\n}\r\n\r\ndata_augmentation_options {\r\n random_rotation90: { \r\n } \r\n}\r\n\r\ndata_augmentation_options {\r\n random_jitter_boxes: { \r\n } \r\n}\r\n\r\ndata_augmentation_options {\r\n random_crop_image {\r\n\tmin_object_covered: 1.0\r\n\tmin_aspect_ratio: 0.75\r\n\tmax_aspect_ratio: 1.5\r\n\tmin_area: 0.25\r\n\tmax_area: 0.875\r\n\toverlap_thresh: 0.5\r\n\trandom_coef: 0.125\r\n}\r\n}\r\n\r\n\r\n optimizer {\r\n momentum_optimizer: {\r\n learning_rate: {\r\n cosine_decay_learning_rate {\r\n learning_rate_base: 8e-2\r\n total_steps: 5000\r\n warmup_learning_rate: 0.001\r\n warmup_steps: 2500\r\n }\r\n }\r\n momentum_optimizer_value: 0.9\r\n }\r\n use_moving_average: false\r\n }\r\n max_number_of_boxes: 100\r\n unpad_groundtruth_tensors: false\r\n}\r\n\r\ntrain_input_reader: {\r\n label_map_path: \"/tf/dataset-export/lb-400images-efficientdet-d0-augment-model/label_map.pbtxt\"\r\n tf_record_input_reader {\r\n input_path: \"/tf/dataset-export/lb-400images-efficientdet-d0-augment-model/train/tf.records\"\r\n }\r\n}\r\n\r\neval_config: {\r\n metrics_set: \"coco_detection_metrics\"\r\n use_moving_averages: false\r\n batch_size: 18;\r\n}\r\n\r\neval_input_reader: {\r\n label_map_path: \"/tf/dataset-export/lb-400images-efficientdet-d0-augment-model/label_map.pbtxt\"\r\n shuffle: false\r\n num_epochs: 1\r\n tf_record_input_reader {\r\n input_path: \"/tf/dataset-export/lb-400images-efficientdet-d0-augment-model/val/tf.records\"\r\n }\r\n}\r\n"
]
],
[
[
"# Train Custom TF2 Object Detector\n\nThis step will launch the TF2 Object Detection training. It can take a while to start-up. \nIf you get an error about not finding the GPU, try shutting down the Jupyter kernel and restarting it.\nWhile it is running, it should print out the Current Loss and which Step it is on.\n\n* pipeline_file: defined above in writing custom training configuration\n* model_dir: the location tensorboard logs and saved model checkpoints will save to\n* num_train_steps: how long to train for\n* num_eval_steps: perform eval on validation set after this many steps",
"_____no_output_____"
]
],
[
[
"# 2:48 PM ET Tuesday, May 25, 2021\n!python /tf/models/research/object_detection/model_main_tf2.py \\\n --pipeline_config_path={pipeline_file} \\\n --model_dir={model_dir} \\\n --alsologtostderr \\\n --num_train_steps={num_steps} \\\n --sample_1_of_n_eval_examples=1 \\\n --num_eval_steps={num_eval_steps} ",
"2021-07-08 20:53:56.154660: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n2021-07-08 20:53:59.234024: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1\n2021-07-08 20:53:59.259096: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.259998: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \npciBusID: 0000:00:1e.0 name: Tesla K80 computeCapability: 3.7\ncoreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n2021-07-08 20:53:59.260066: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n2021-07-08 20:53:59.264586: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublas.so.11\n2021-07-08 20:53:59.264668: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublasLt.so.11\n2021-07-08 20:53:59.265942: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcufft.so.10\n2021-07-08 20:53:59.266271: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcurand.so.10\n2021-07-08 20:53:59.267456: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusolver.so.11\n2021-07-08 20:53:59.268525: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusparse.so.11\n2021-07-08 20:53:59.268757: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudnn.so.8\n2021-07-08 20:53:59.268885: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.269705: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.270442: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0\n2021-07-08 20:53:59.270765: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\nTo enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2021-07-08 20:53:59.271185: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.271986: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \npciBusID: 0000:00:1e.0 name: Tesla K80 computeCapability: 3.7\ncoreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n2021-07-08 20:53:59.272113: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.272913: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.273640: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0\n2021-07-08 20:53:59.273696: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n2021-07-08 20:53:59.833997: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n2021-07-08 20:53:59.834052: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] 0 \n2021-07-08 20:53:59.834078: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1277] 0: N \n2021-07-08 20:53:59.834369: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.835221: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.836071: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-07-08 20:53:59.836837: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1418] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10661 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:1e.0, compute capability: 3.7)\nWARNING:tensorflow:Collective ops is not configured at program startup. Some performance features may not be enabled.\nW0708 20:53:59.839558 139809137821504 mirrored_strategy.py:379] Collective ops is not configured at program startup. Some performance features may not be enabled.\nINFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)\nI0708 20:54:00.073076 139809137821504 mirrored_strategy.py:369] Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',)\nINFO:tensorflow:Maybe overwriting train_steps: 40000\nI0708 20:54:00.080240 139809137821504 config_util.py:552] Maybe overwriting train_steps: 40000\nINFO:tensorflow:Maybe overwriting use_bfloat16: False\nI0708 20:54:00.080397 139809137821504 config_util.py:552] Maybe overwriting use_bfloat16: False\nI0708 20:54:00.098092 139809137821504 ssd_efficientnet_bifpn_feature_extractor.py:143] EfficientDet EfficientNet backbone version: efficientnet-b0\nI0708 20:54:00.098223 139809137821504 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet BiFPN num filters: 64\nI0708 20:54:00.098315 139809137821504 ssd_efficientnet_bifpn_feature_extractor.py:146] EfficientDet BiFPN num iterations: 3\nI0708 20:54:00.111051 139809137821504 efficientnet_model.py:147] round_filter input=32 output=32\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nI0708 20:54:00.157430 139809137821504 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nI0708 20:54:00.162168 139809137821504 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nI0708 20:54:00.166090 139809137821504 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nI0708 20:54:00.167571 139809137821504 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nINFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\nI0708 20:54:00.176658 139809137821504 cross_device_ops.py:621] Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).\n"
]
],
[
[
"# Evaluate trained model\nAfter the model has finished training, try running it against some data to see if it atleast works.",
"_____no_output_____"
]
],
[
[
"\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport io, os, glob\nimport scipy.misc\nimport numpy as np\nfrom six import BytesIO\nfrom PIL import Image, ImageDraw, ImageFont\n\nimport tensorflow as tf\n\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import config_util\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom object_detection.builders import model_builder\n\n%matplotlib inline",
"_____no_output_____"
],
[
"def load_image_into_numpy_array(path):\n\n \"\"\"Load an image from file into a numpy array.\n\n Puts image into numpy array to feed into tensorflow graph.\n Note that by convention we put it into a numpy array with shape\n (height, width, channels), where channels=3 for RGB.\n\n Args:\n path: the file path to the image\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3)\n \"\"\"\n img_data = tf.io.gfile.GFile(path, 'rb').read()\n image = Image.open(BytesIO(img_data))\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)",
"_____no_output_____"
],
[
"%ls {model_dir}",
"_____no_output_____"
]
],
[
[
"## Load model from a training checkpoint\nSelect a checkpoint index from above",
"_____no_output_____"
]
],
[
[
"# generally you want to put the last ckpt index from training in here\ncheckpoint_index=41\n\n# recover our saved model\npipeline_config = pipeline_file\n\ncheckpoint = model_dir + \"ckpt-\" + str(checkpoint_index)\nconfigs = config_util.get_configs_from_pipeline_file(pipeline_config)\nmodel_config = configs['model']\ndetection_model = model_builder.build(model_config=model_config, is_training=False)\n\n# Restore checkpoint\nckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\nckpt.restore(os.path.join(checkpoint)).expect_partial()\n\n\ndef get_model_detection_function(model):\n \"\"\"Get a tf.function for detection.\"\"\"\n\n @tf.function\n def detect_fn(image):\n \"\"\"Detect objects in image.\"\"\"\n\n image, shapes = model.preprocess(image)\n prediction_dict = model.predict(image, shapes)\n detections = model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])\n\n return detect_fn\n\ndetect_fn = get_model_detection_function(detection_model)",
"_____no_output_____"
],
[
"# map labels for inference decoding\nlabel_map_path = configs['eval_input_config'].label_map_path\nlabel_map = label_map_util.load_labelmap(label_map_path)\ncategories = label_map_util.convert_label_map_to_categories(\n label_map,\n max_num_classes=label_map_util.get_max_label_map_index(label_map),\n use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\nlabel_map_dict = label_map_util.get_label_map_dict(label_map, use_display_name=True)",
"_____no_output_____"
],
[
"#run detector on test image\n#it takes a little longer on the first run and then runs at normal speed. \nimport random\n\nTEST_IMAGE_PATHS = glob.glob('/tf/media/capture-5-13/Textron Aviation Inc 680A/*.jpg') #/tf/dataset-export/pet/images/keeshond_171.jpg') #'/tf/testing/Dassault Aviation FALCON 2000/*.jpg')\nimage_path = random.choice(TEST_IMAGE_PATHS)\nimage_np = load_image_into_numpy_array(image_path)\n\ninput_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n\ndetections, predictions_dict, shapes = detect_fn(input_tensor)\n\n\nprint(detections['detection_scores'])\nlabel_id_offset = 1 # Depending on whether your LabelMap starts at 0 or 1\nimage_np_with_detections = image_np.copy()\n\nviz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_with_detections,\n detections['detection_boxes'][0].numpy(),\n (detections['detection_classes'][0].numpy() + label_id_offset).astype(int),\n detections['detection_scores'][0].numpy(),\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=200,\n min_score_thresh=.2,\n agnostic_mode=False,\n)\n\nplt.figure(figsize=(20,25))\nplt.imshow(image_np_with_detections)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Export the model\nWhen you have a working model, use the TF2 Object Detection API to export it to a saved model.",
"_____no_output_____"
],
[
"### Export a Saved Model that uses Image Tensors",
"_____no_output_____"
]
],
[
[
"image_tensor_model_export_dir = model_export_dir + \"image_tensor_saved_model\"",
"_____no_output_____"
],
[
"print(image_tensor_model_export_dir)",
"/tf/model-export/lb-400images-efficientdet-d0-augment-model/image_tensor_saved_model\n"
],
[
"!python /tf/models/research/object_detection/exporter_main_v2.py \\\n --input_type image_tensor \\\n --trained_checkpoint_dir={model_dir} \\\n --pipeline_config_path={pipeline_file} \\\n --output_directory {image_tensor_model_export_dir}",
"2021-06-28 23:00:37.233618: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n2021-06-28 23:00:39.839076: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcuda.so.1\n2021-06-28 23:00:39.864436: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:39.865310: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \npciBusID: 0000:00:1e.0 name: Tesla K80 computeCapability: 3.7\ncoreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n2021-06-28 23:00:39.865362: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n2021-06-28 23:00:39.869447: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublas.so.11\n2021-06-28 23:00:39.869533: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcublasLt.so.11\n2021-06-28 23:00:39.870937: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcufft.so.10\n2021-06-28 23:00:39.871318: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcurand.so.10\n2021-06-28 23:00:39.872613: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusolver.so.11\n2021-06-28 23:00:39.873689: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcusparse.so.11\n2021-06-28 23:00:39.873938: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudnn.so.8\n2021-06-28 23:00:39.874103: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:39.874960: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:39.875740: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0\n2021-06-28 23:00:39.876140: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\nTo enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2021-06-28 23:00:39.876588: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:39.877376: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties: \npciBusID: 0000:00:1e.0 name: Tesla K80 computeCapability: 3.7\ncoreClock: 0.8235GHz coreCount: 13 deviceMemorySize: 11.17GiB deviceMemoryBandwidth: 223.96GiB/s\n2021-06-28 23:00:39.877497: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:39.878292: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:39.879020: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0\n2021-06-28 23:00:39.879080: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n2021-06-28 23:00:40.521033: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:\n2021-06-28 23:00:40.521120: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] 0 \n2021-06-28 23:00:40.521143: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1277] 0: N \n2021-06-28 23:00:40.521465: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:40.522340: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:40.523146: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2021-06-28 23:00:40.523906: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1418] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10661 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:1e.0, compute capability: 3.7)\nI0628 23:00:40.785022 140532255090496 ssd_efficientnet_bifpn_feature_extractor.py:143] EfficientDet EfficientNet backbone version: efficientnet-b0\nI0628 23:00:40.785330 140532255090496 ssd_efficientnet_bifpn_feature_extractor.py:144] EfficientDet BiFPN num filters: 64\nI0628 23:00:40.785445 140532255090496 ssd_efficientnet_bifpn_feature_extractor.py:146] EfficientDet BiFPN num iterations: 3\nI0628 23:00:40.792782 140532255090496 efficientnet_model.py:147] round_filter input=32 output=32\nI0628 23:00:40.825772 140532255090496 efficientnet_model.py:147] round_filter input=32 output=32\nI0628 23:00:40.825912 140532255090496 efficientnet_model.py:147] round_filter input=16 output=16\nI0628 23:00:40.894950 140532255090496 efficientnet_model.py:147] round_filter input=16 output=16\nI0628 23:00:40.895127 140532255090496 efficientnet_model.py:147] round_filter input=24 output=24\nI0628 23:00:41.066718 140532255090496 efficientnet_model.py:147] round_filter input=24 output=24\nI0628 23:00:41.066934 140532255090496 efficientnet_model.py:147] round_filter input=40 output=40\nI0628 23:00:41.239912 140532255090496 efficientnet_model.py:147] round_filter input=40 output=40\nI0628 23:00:41.240132 140532255090496 efficientnet_model.py:147] round_filter input=80 output=80\nI0628 23:00:41.503206 140532255090496 efficientnet_model.py:147] round_filter input=80 output=80\nI0628 23:00:41.503429 140532255090496 efficientnet_model.py:147] round_filter input=112 output=112\nI0628 23:00:41.763689 140532255090496 efficientnet_model.py:147] round_filter input=112 output=112\nI0628 23:00:41.763906 140532255090496 efficientnet_model.py:147] round_filter input=192 output=192\nI0628 23:00:42.249581 140532255090496 efficientnet_model.py:147] round_filter input=192 output=192\nI0628 23:00:42.249801 140532255090496 efficientnet_model.py:147] round_filter input=320 output=320\nI0628 23:00:42.333547 140532255090496 efficientnet_model.py:147] round_filter input=1280 output=1280\nI0628 23:00:42.368739 140532255090496 efficientnet_model.py:458] Building model efficientnet with params ModelConfig(width_coefficient=1.0, depth_coefficient=1.0, resolution=224, dropout_rate=0.2, blocks=(BlockConfig(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise'), BlockConfig(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25, id_skip=True, fused_conv=False, conv_type='depthwise')), stem_base_filters=32, top_base_filters=1280, activation='simple_swish', batch_norm='default', bn_momentum=0.99, bn_epsilon=0.001, weight_decay=5e-06, drop_connect_rate=0.2, depth_divisor=8, min_depth=None, use_se=True, input_channels=3, num_classes=1000, model_name='efficientnet', rescale_input=False, data_format='channels_last', dtype='float32')\n"
]
],
[
[
"### Export a Saved Model that uses TF Examples",
"_____no_output_____"
]
],
[
[
"# Ignore for now - we do not need to use the TF Example approach.\n\n#tf_example_model_export_dir = model_export_dir + \"tf_example_saved_model\"",
"_____no_output_____"
],
[
"#!python /tf/models/research/object_detection/exporter_main_v2.py \\\n# --input_type=tf_example \\\n# --trained_checkpoint_dir={model_dir} \\\n# --pipeline_config_path={pipeline_file} \\\n# --output_directory {tf_example_model_export_dir}",
"_____no_output_____"
]
],
[
[
"### Export a TFLite compatible model\nRemeber that only Detection models that use SSDs are supported",
"_____no_output_____"
]
],
[
[
"!python /tf/models/research/object_detection/export_tflite_graph_tf2.py \\\n --pipeline_config_path={pipeline_file} \\\n --trained_checkpoint_dir={model_dir} \\\n --output_directory={model_export_dir}tflite-compatible\n",
"_____no_output_____"
],
[
"# I think we skip this step...\n\n#! tflite_convert \\\n# --saved_model_dir=\"{model_export_dir}tflite-compatible/saved_model\" \\\n# --output_file=\"{model_export_dir}output.tflite\"",
"_____no_output_____"
],
[
"#https://github.com/tensorflow/models/issues/9033#issuecomment-706573546\nimport cv2\nimport glob\nimport numpy as np\n\ntrain_images = []\n\ndef representative_data_gen():\n path = '/tf/testing/Airbus A319-115'\n\n dataset_list = tf.data.Dataset.list_files(path + '/*.jpg')\n for i in range(100):\n image = next(iter(dataset_list))\n image = tf.io.read_file(image)\n image = tf.io.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [300, 300])\n image = tf.cast(image / 255., tf.float32)\n image = tf.expand_dims(image, 0)\n yield [image]\n\n\nconverter = tf.lite.TFLiteConverter.from_saved_model(model_export_dir+\"tflite-compatible/saved_model\")\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8,\n tf.lite.OpsSet.TFLITE_BUILTINS]\n#converter.optimizations = [tf.lite.Optimize.DEFAULT]\n#converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS]\n#converter.inference_input_type = tf.int8\n#converter.inference_output_type = tf.int8\nconverter.representative_dataset = representative_data_gen\n# Ensure that if any ops can't be quantized, the converter throws an error\n#converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n# Set the input and output tensors to uint8 (APIs added in r2.3)\n#converter.inference_input_type = tf.uint8\n#converter.inference_output_type = tf.uint8\ntflite_model = converter.convert()\n\n# Save the model.\nwith open(model_export_dir+'model.tflite', 'wb') as f:\n f.write(tflite_model)",
"_____no_output_____"
],
[
"!curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -\n!echo \"deb https://packages.cloud.google.com/apt coral-edgetpu-stable main\" | tee /etc/apt/sources.list.d/coral-edgetpu.list\n!apt-get update\n!apt-get -y install edgetpu-compiler",
"_____no_output_____"
],
[
"!edgetpu_compiler -s {model_export_dir}model.tflite -o {model_export_dir}",
"_____no_output_____"
]
],
[
[
"### Export a TensorJS compatible model\nFrom: https://www.tensorflow.org/js/tutorials/conversion/import_saved_model",
"_____no_output_____"
]
],
[
[
"!pip install tensorflowjs",
"_____no_output_____"
],
[
"! tensorflowjs_converter \\\n --input_format=tf_saved_model \\\n {model_export_dir}image_tensor_saved_model/saved_model \\\n {model_export_dir}web_model",
"_____no_output_____"
],
[
"!saved_model_cli show --dir /tf/models/research/deploy/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model --all",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e79a545ceaa387f2c467ff933371faeafb18b86a | 1,163 | ipynb | Jupyter Notebook | jubox/test/test_files/nb_unrun.ipynb | Miksus/jubox | daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5 | [
"MIT"
] | 1 | 2020-04-26T05:18:45.000Z | 2020-04-26T05:18:45.000Z | jubox/test/test_files/nb_unrun.ipynb | Miksus/jubox | daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5 | [
"MIT"
] | null | null | null | jubox/test/test_files/nb_unrun.ipynb | Miksus/jubox | daaf1e223e0a7c0a3bf9ae03b88d629c0f99d4d5 | [
"MIT"
] | null | null | null | 17.102941 | 41 | 0.503009 | [
[
[
"print(\"This notebook is not run\")",
"_____no_output_____"
],
[
"x = \"foo\"",
"_____no_output_____"
],
[
"y = \"bar\"",
"_____no_output_____"
],
[
"x + y",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e79a5e3692858c5f1f01e295b17cf2a2a53bb0f9 | 148,090 | ipynb | Jupyter Notebook | Homeworks/Homework1/VANGUMALLI-D-python-challenge-04-sept-2017.ipynb | DineshVangumalli/big-data-python-class | e986308803637ea461eb74635f46dc5fa035df11 | [
"MIT"
] | null | null | null | Homeworks/Homework1/VANGUMALLI-D-python-challenge-04-sept-2017.ipynb | DineshVangumalli/big-data-python-class | e986308803637ea461eb74635f46dc5fa035df11 | [
"MIT"
] | null | null | null | Homeworks/Homework1/VANGUMALLI-D-python-challenge-04-sept-2017.ipynb | DineshVangumalli/big-data-python-class | e986308803637ea461eb74635f46dc5fa035df11 | [
"MIT"
] | null | null | null | 120.496338 | 74,990 | 0.830279 | [
[
[
"# Python Homework 1 - The challenge\n\nTake the python challenge found on www.pythonchallenge.com/.\nYou will copy this notebook. Rename it as:\nYOURLASTNAME-FIRSTINITIAL-python-challenge-xx-Sept-2017\n\nwith your name replacing your last name and first initial and the xx replaced by the date you started or submitted.\n\nDo the first 10 challenges and put each question and your python solution and the final resulting url in this notebook or a series of connected notebooks.\nDiscuss your attempt what parts of python were you using.\n\nUpload your completed jupyter notebook zip file to elearning site as your homework submission. Do not put this notebook on your github.\n\n Note: 3 points for 10 correct answers... 4 points for 15 and 5 points for all 33 ",
"_____no_output_____"
],
[
"\n### Python challenge question 1",
"_____no_output_____"
],
[
"This challenge is straight forward. It hints to change URL and so I tried to change the URL to \"http://www.pythonchallenge.com/pc/def/1.html\". The URL showed the message \"2**38 is much much larger\", from which I got the clue.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/0.html\n\nprint(2**38)\nprint(pow(2,38))\n\n#http://www.pythonchallenge.com/pc/def/map.html",
"274877906944\n274877906944\n"
]
],
[
[
"### Python challenge question 2",
"_____no_output_____"
],
[
"I changed the URL to \"http://www.pythonchallenge.com/pc/def/274877906944.html\" which redirected to \"http://www.pythonchallenge.com/pc/def/map.html\". This challenge has a picture with letters on i and a text below it. It can be seen that the letters on the right are two characters after the letters on the left, (K->M, O->Q, E->G) and so got the clue that all the letters in the text must be moved by 2 letters. I googled \"mapping characters in python\" and found that there is a function called \"str.maketrans\" in the library \"string\". ",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/274877906944.html\n#http://www.pythonchallenge.com/pc/def/map.html\n\nimport string\n\ninp=\"abcdefghijklmnopqrstuvwxyz\"\noutp=\"cdefghijklmnopqrstuvwxyzab\"\ntrans=str.maketrans(inp, outp)\n\nstrg = \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\nprint(strg.translate(trans))\n\nprint (\"map\".translate(trans)) #Apply the function on \"map\"\n\n#http://www.pythonchallenge.com/pc/def/ocr.html",
"i hope you didnt translate it by hand. thats what computers are for. doing it in by hand is inefficient and that's why this text is so long. using string.maketrans() is recommended. now apply on the url.\nocr\n"
]
],
[
[
"### Python challenge question 3",
"_____no_output_____"
],
[
"I then changed the URL to \"http://www.pythonchallenge.com/pc/def/ocr.html\". This challenge shows a picture of a book and it says to recognize the characters, giving a clue to check the page source. When I checked it, I found big block of characters in the page source which I thought should be read into python using \"urllib\" library. There are already functions that look for specific patterns using regular expressions.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/ocr.html\n\nimport urllib.request\nurl_ocr = urllib.request.urlopen(\"http://www.pythonchallenge.com/pc/def/ocr.html\").read().decode()\n#print(url_ocr)\n\nimport re\ncontent = re.findall(\"<!--(.*?)-->\", url_ocr, re.S)[-1] #findall() matches all occurrences of a pattern\n #re.S makes the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline.\nprint(re.findall(\"[A-Za-z]\", content)) \n\n#http://www.pythonchallenge.com/pc/def/equality.html",
"['e', 'q', 'u', 'a', 'l', 'i', 't', 'y']\n"
]
],
[
[
"### Python challenge question 4",
"_____no_output_____"
],
[
"Then, I changed the URL to \"http://www.pythonchallenge.com/pc/def/equality.html\" and there is text which says \"One small letter, surrounded by EXACTLY three big bodyguards on each of its sides\". I checked the page source to find any other clues and there is a big block of text, just as previous challenge. \n\nWith a little thought, I guessed that I should look for characters in the pattern of \"one small letter\" surrounded by \"three capital letters\" on both the sides. ",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/equality.html\n\nimport urllib.request\nurl_eq = urllib.request.urlopen(\"http://www.pythonchallenge.com/pc/def/equality.html\").read().decode()\n\nimport re\ndata = re.findall(\"<!--(.*?)-->\", url_eq, re.S)[-1] #findall() matches all occurrences of a pattern\n #re.S makes the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline.\n\nprint(re.findall(\"[^A-Z]+[A-Z]{3}([a-z])[A-Z]{3}[^A-Z]+\", data))\n#The first part is ^A-Z means that the first character should be anything but a capital A through Z. The next three characters must be a capital letter A thorugh Z, denoted by the {3}. \n#The next element must be a lower case a through z. Again three more upper case A through Z elements. And finally the first part repeats. \n\nprint(\"\".join(re.findall(\"[^A-Z]+[A-Z]{3}([a-z])[A-Z]{3}[^A-Z]+\", data))) #Joins the list with no space inbetween.\n\n#http://www.pythonchallenge.com/pc/def/linkedlist.php",
"['l', 'i', 'n', 'k', 'e', 'd', 'l', 'i', 's', 't']\nlinkedlist\n"
]
],
[
[
"### Python challenge question 5",
"_____no_output_____"
],
[
"For the next challenge, I changed the URL to \"http://www.pythonchallenge.com/pc/def/linkedlist.html\" but it showed text \"linkedlist.php\". So, I changed the URL to \"http://www.pythonchallenge.com/pc/def/linkedlist.php\". When I checked the page source, it has \"urllib may help. DON'T TRY ALL NOTHINGS, since it will never end. 400 times is more than enough.\"\n\nThere is another link \"linkedlist.php?nothing=12345\", which when I clicked took me to a page with text \"and the next nothing is 44827\". I tried to chage the \"next nothing\" on the URL to the numbers it suggested, it gave me other numbers. \n\nI thought that this is about web pages, and from the text it showed, I guessed there are a lot of webpages. It also gave a clue to use urllib.\n\nI tried to print all the numbers it was generating and at one point, it stopped at \"and the next nothing is 16044\" and \n\"Yes. Divide by two and keep going\". So, I divided the number 16044 by 2 and kept on printing until it gave \"peak.html\". ",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/linkedlist.php\n\nimport urllib\nimport re\n\nurl_ll = (\"http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=%s\")\nnum=\"12345\"\n#num=16044/2\n\nwhile num!=\"\":\n data = urllib.request.urlopen(url_ll % num).read().decode()\n #print(data)\n num = \"\".join(re.findall(\"and the next nothing is (\\d+)\",data))\nelse :\n print(\"Came to an End\") \n \nnum=16044/2\n\nwhile num!=\"\":\n data = urllib.request.urlopen(url_ll % num).read().decode()\n print(data)\n num = \"\".join(re.findall(\"and the next nothing is (\\d+)\",data))\nelse :\n print(\"Came to an End\")\n \n#http://www.pythonchallenge.com/pc/def/peak.html",
"Came to an End\nand the next nothing is 25357\nand the next nothing is 89879\nand the next nothing is 80119\nand the next nothing is 50290\nand the next nothing is 9297\nand the next nothing is 30571\nand the next nothing is 7414\nand the next nothing is 30978\nand the next nothing is 16408\nand the next nothing is 80109\nand the next nothing is 55736\nand the next nothing is 15357\nand the next nothing is 80887\nand the next nothing is 35014\nand the next nothing is 16523\nand the next nothing is 50286\nand the next nothing is 34813\nand the next nothing is 77562\nand the next nothing is 54746\nand the next nothing is 22680\nand the next nothing is 19705\nand the next nothing is 77000\nand the next nothing is 27634\nand the next nothing is 21008\nand the next nothing is 64994\nand the next nothing is 66109\nand the next nothing is 37855\nand the next nothing is 36383\nand the next nothing is 68548\nand the next nothing is 96070\nand the next nothing is 83051\nand the next nothing is 58026\nand the next nothing is 44726\nand the next nothing is 35748\nand the next nothing is 61287\nand the next nothing is 559\nand the next nothing is 81318\nand the next nothing is 50443\nand the next nothing is 1570\nand the next nothing is 75244\nand the next nothing is 56265\nand the next nothing is 17694\nand the next nothing is 48033\nand the next nothing is 56523\nand the next nothing is 51253\nand the next nothing is 85750\nand the next nothing is 42760\nand the next nothing is 11877\nand the next nothing is 15962\nand the next nothing is 75494\nand the next nothing is 87283\nand the next nothing is 40396\nand the next nothing is 49574\nand the next nothing is 82682\nThere maybe misleading numbers in the \ntext. One example is 82683. Look only for the next nothing and the next nothing is 63579\nand the next nothing is 37278\nand the next nothing is 53548\nand the next nothing is 66081\nand the next nothing is 67753\nand the next nothing is 56337\nand the next nothing is 3356\nand the next nothing is 94525\nand the next nothing is 89574\nand the next nothing is 4413\nand the next nothing is 82294\nand the next nothing is 56060\nand the next nothing is 95493\nand the next nothing is 80865\nand the next nothing is 66242\nand the next nothing is 16065\nand the next nothing is 62145\nand the next nothing is 23147\nand the next nothing is 83763\nand the next nothing is 62381\nand the next nothing is 76841\nand the next nothing is 91706\nand the next nothing is 9268\nand the next nothing is 64814\nand the next nothing is 80809\nand the next nothing is 14039\nand the next nothing is 73355\nand the next nothing is 81905\nand the next nothing is 36402\nand the next nothing is 27221\nand the next nothing is 79607\nand the next nothing is 91763\nand the next nothing is 11631\nand the next nothing is 76396\nand the next nothing is 69905\nand the next nothing is 11073\nand the next nothing is 71281\nand the next nothing is 54345\nand the next nothing is 19047\nand the next nothing is 34376\nand the next nothing is 3193\nand the next nothing is 74258\nand the next nothing is 62712\nand the next nothing is 1823\nand the next nothing is 21232\nand the next nothing is 87890\nand the next nothing is 21545\nand the next nothing is 37136\nand the next nothing is 23060\nand the next nothing is 5385\nand the next nothing is 4620\nand the next nothing is 39111\nand the next nothing is 35914\nand the next nothing is 60310\nand the next nothing is 19178\nand the next nothing is 44671\nand the next nothing is 45736\nand the next nothing is 9216\nand the next nothing is 12585\nand the next nothing is 11302\nand the next nothing is 33096\nand the next nothing is 13967\nand the next nothing is 57004\nand the next nothing is 64196\nand the next nothing is 73929\nand the next nothing is 24800\nand the next nothing is 25081\nand the next nothing is 90033\nand the next nothing is 45919\nand the next nothing is 54827\nand the next nothing is 73950\nand the next nothing is 56978\nand the next nothing is 8133\nand the next nothing is 61900\nand the next nothing is 47769\nand the next nothing is 631\nand the next nothing is 2284\nand the next nothing is 60074\nand the next nothing is 35959\nand the next nothing is 57158\nand the next nothing is 90990\nand the next nothing is 27935\nand the next nothing is 99927\nand the next nothing is 41785\nand the next nothing is 32660\nand the next nothing is 4328\nand the next nothing is 42067\nand the next nothing is 8743\nand the next nothing is 38613\nand the next nothing is 21100\nand the next nothing is 77864\nand the next nothing is 6523\nand the next nothing is 6927\nand the next nothing is 82930\nand the next nothing is 35846\nand the next nothing is 31785\nand the next nothing is 41846\nand the next nothing is 72387\nand the next nothing is 59334\nand the next nothing is 65520\nand the next nothing is 93781\nand the next nothing is 55840\nand the next nothing is 80842\nand the next nothing is 59022\nand the next nothing is 23298\nand the next nothing is 27709\nand the next nothing is 96791\nand the next nothing is 75635\nand the next nothing is 52899\nand the next nothing is 66831\npeak.html\nCame to an End\n"
]
],
[
[
"### Python challenge question 6",
"_____no_output_____"
],
[
"For the next challenge, I changed the URL to \"http://www.pythonchallenge.com/pc/def/peak.html\" which showed a picture of a hill with the text “pronounce it”. When I checked the page source, it showed some text \"peak hell sounds familiar ?\" and a file named \"banner.p\" which again took me to \"http://www.pythonchallenge.com/pc/def/banner.p\" and it has some text in it.\n\nI changed the URL to \"peakhell.html\" but nothing showed up. I googled \"peakhell\" and there were results regarding the Python Challenge itself and found that it was refering to a Python object serialization module called \"pickle\".So,, I changed the URL to \"http://www.pythonchallenge.com/pc/def/pickle.html\" and it showed a text \"yes! pickle!\" which confirmed the usage of module \"pickle\".\n\nI learnt quiet a few concepts regarding pickling when I googled \".p files in Python\". \"Pickle\" is used for serializing and de-serializing a Python object structure. Any object in python can be pickled. It “serialises” the object first before writing it to file. Pickling is a way to convert a python object (list, dict, etc.) into a character stream. The idea is that this character stream contains all the information necessary to reconstruct the object in another python script.\n\nWhen I checked the URL \"banner.p\", it showed a text which looked like output of something that has been pickled. \nI used \"urllib\" and \"pickle\" to load the file. When I checked the file, there are a list of tuples. It's like a character and the number of times it is repeated.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/peak.html\n\nimport urllib.request\nurl_ban = urllib.request.urlopen(\"http://www.pythonchallenge.com/pc/def/banner.p\")\n\nimport pickle\ndata = pickle.load(url_ban) #Reads a pickled object representation from the open file object given in the constructor, and return the reconstituted object hierarchy specified therein. \n#print(data) #Printed a list of tuples.\n\nfor row in data:\n print(\"\".join([r[1] * r[0] for r in row]))\n \n#http://www.pythonchallenge.com/pc/def/channel.html",
" \n ##### ##### \n #### #### \n #### #### \n #### #### \n #### #### \n #### #### \n #### #### \n #### #### \n ### #### ### ### ##### ### ##### ### ### #### \n ### ## #### ####### ## ### #### ####### #### ####### ### ### #### \n ### ### ##### #### ### #### ##### #### ##### #### ### ### #### \n ### #### #### ### ### #### #### #### #### ### #### #### \n ### #### #### ### #### #### #### #### ### ### #### \n#### #### #### ## ### #### #### #### #### #### ### #### \n#### #### #### ########## #### #### #### #### ############## #### \n#### #### #### ### #### #### #### #### #### #### #### \n#### #### #### #### ### #### #### #### #### #### #### \n ### #### #### #### ### #### #### #### #### ### #### \n ### ## #### #### ### #### #### #### #### #### ### ## #### \n ### ## #### #### ########### #### #### #### #### ### ## #### \n ### ###### ##### ## #### ###### ########### ##### ### ######\n \n"
]
],
[
[
"### Python challenge question 7",
"_____no_output_____"
],
[
"For the next challenge, I changed the URL to \"http://www.pythonchallenge.com/pc/def/channel.html\" and it showed a picture of a zipper and I felt it is something related to zip files. When I checked page source, it showed some text. I changed the URl to \".zip\" and got a file with a lot of text files. I checked a couple of them, it shoed some text. I found a readme file at the end, which when I checked, has text as shown below: \n\nWelcome to my zipped list.\nhint1: start from 90052\nhint2: answer is inside the zip\n\nI did some reading regarding \"zipfile\" module. Upon opening 90052.txt it said “Next nothing is 94191”. I tried to print the content in the text file and it stopped at a point and asked to collect the comments inside the zip file.\nWhen I printed that out the message, it said \"HOCKEY\".",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/channel.html\n\nimport urllib\nimport zipfile\nimport re\n\nurl_ll = \"http://www.pythonchallenge.com/pc/def/channel.html\"\nzf = zipfile.ZipFile(\"channel.zip\", 'r')\nprint(zf.read(\"readme.txt\").decode())\n\nnum = \"90052\"\ncomments = \"\"\nwhile num != \"\" :\n data = zf.read(num + \".txt\").decode()\n comments += zf.getinfo(num+\".txt\").comment.decode()\n num = \"\".join(re.findall(\"Next nothing is (\\d+)\",data))\n #print(data)\nelse :\n print(data)\n \nprint(comments)\n\n#http://www.pythonchallenge.com/pc/def/oxygen.html",
"welcome to my zipped list.\n\nhint1: start from 90052\nhint2: answer is inside the zip\n\nCollect the comments.\n****************************************************************\n****************************************************************\n** **\n** OO OO XX YYYY GG GG EEEEEE NN NN **\n** OO OO XXXXXX YYYYYY GG GG EEEEEE NN NN **\n** OO OO XXX XXX YYY YY GG GG EE NN NN **\n** OOOOOOOO XX XX YY GGG EEEEE NNNN **\n** OOOOOOOO XX XX YY GGG EEEEE NN **\n** OO OO XXX XXX YYY YY GG GG EE NN **\n** OO OO XXXXXX YYYYYY GG GG EEEEEE NN **\n** OO OO XX YYYY GG GG EEEEEE NN **\n** **\n****************************************************************\n **************************************************************\n\n"
]
],
[
[
"### Python challenge question 8",
"_____no_output_____"
],
[
"For the next challenge, I tried URL \"http://www.pythonchallenge.com/pc/def/hockey.html\" but it gave a text saying \"it's in the air. look at the letters\". Then, I tried \"http://www.pythonchallenge.com/pc/def/oxygen.html\". It gave a picture in which the center of the picture was grey scaled from left. So, there might be something encoded in this line. I did some work on image analysis previously and so was quick enough to get the clue for this challenge.\n\nWe can get the pixels using Python Image Library, PIL. I printed the pixel values alon the width of the image at exactly half point of the height because that's where the image is grey scaled. The pixel values will be same for R,G,B if the image is greyscaled. The values of the pixels changed after 7 blocks, on observing the pixel values along the width. \n\nIt gave a clue in the form of \"smart guy, you made it. the next level is [105, 110, 116, 101, 103, 114, 105, 116, 121]\". The blocks were 7 pixels wide and so I took out the first number in the color given to me and printed string representing a character.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/oxygen.html\n\nimport urllib.request\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\n\nurl = \"http://www.pythonchallenge.com/pc/def/oxygen.png\"\n\nimg_oxy = requests.get(url)\nimg = Image.open(BytesIO(img_oxy.content))\n\nwidth,height = img.size\nprint(width)\nprint(height)\n#for w in range(width):\n# print(img.getpixel((w,height/2))) #Prints the pixel values at the greyscale along the width of the image.\n\nfor w in range(0,width,7):\n print(chr(img.getpixel((w,height/2))[0]), end='') #Return the string representing a character whose Unicode code point is the integer.\n \nprint(''.join(map(chr, [105, 110, 116, 101, 103, 114, 105, 116, 121])))\n\n#http://www.pythonchallenge.com/pc/def/integrity.html",
"629\n95\nsmart guy, you made it. the next level is [105, 110, 116, 101, 103, 114, 105, 116, 121]pe_integrity\n"
]
],
[
[
"### Python challenge question 9",
"_____no_output_____"
],
[
"For the next challenge, I changed the URL to \"http://www.pythonchallenge.com/pc/def/integrity.html\" which showed a picture of a bee with text \"Where is the missing link?\". It seemed the bee is clickable and when clicked, it asked for a a userame and password.\n\nAlso, when I checked page source, there was a text:\n\n\"un: 'BZh91AY&SYA\\xaf\\x82\\r\\x00\\x00\\x01\\x01\\x80\\x02\\xc0\\x02\\x00 \\x00!\\x9ah3M\\x07<]\\xc9\\x14\\xe1BA\\x06\\xbe\\x084' and\npw: 'BZh91AY&SY\\x94$|\\x0e\\x00\\x00\\x00\\x81\\x00\\x03$ \\x00!\\x9ah3M\\x13<]\\xc9\\x14\\xe1BBP\\x91\\xf08'\"\n\nI googled regarding this challenge as I had no idea what it's asking to do. Quickly, I found words \"BZ2\" and so googled regarding \"bz2 in Python\" and had briefly studied regarding what it does. I got a hunch that the strings that were in page source might need to be decompressed. When I did it, I got 'huge' and 'file', which are username and password. ",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/def/integrity.html\n\nimport bz2\n\nusr = b\"BZh91AY&SYA\\xaf\\x82\\r\\x00\\x00\\x01\\x01\\x80\\x02\\xc0\\x02\\x00 \\x00!\\x9ah3M\\x07<]\\xc9\\x14\\xe1BA\\x06\\xbe\\x084\"\npwd = b\"BZh91AY&SY\\x94$|\\x0e\\x00\\x00\\x00\\x81\\x00\\x03$ \\x00!\\x9ah3M\\x13<]\\xc9\\x14\\xe1BBP\\x91\\xf08\"\n\nprint(bz2.BZ2Decompressor().decompress(usr)) #Decompress data (a bytes-like object), returns uncompressed data as bytes.\nprint(bz2.BZ2Decompressor().decompress(pwd))\n\n#http://www.pythonchallenge.com/pc/return/good.html",
"b'huge'\nb'file'\n"
]
],
[
[
"### Python challenge question 10",
"_____no_output_____"
],
[
"For this challenge, I gave username and password previously obtained which took me to URL \"http://www.pythonchallenge.com/pc/return/good.html\". It has a picture of a stem with black dots. It seemed like we need to connect the dots to get the answer. Looking at page source, my intuition is correct that we need to connect dots and there are lists with numbers for 'first' and 'second'. Also, \"first+second=?\" seemed like a clue. So, I joined first and second and tried to draw an image with size mentioned i page source, 640 by 480. I got an image of bull.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/good.html\n\nfrom PIL import Image, ImageDraw\n\nfirst=[\n146,399,163,403,170,393,169,391,166,386,170,381,170,371,170,355,169,346,167,335,170,329,170,320,170,\n310,171,301,173,290,178,289,182,287,188,286,190,286,192,291,194,296,195,305,194,307,191,312,190,316,\n190,321,192,331,193,338,196,341,197,346,199,352,198,360,197,366,197,373,196,380,197,383,196,387,192,\n389,191,392,190,396,189,400,194,401,201,402,208,403,213,402,216,401,219,397,219,393,216,390,215,385,\n215,379,213,373,213,365,212,360,210,353,210,347,212,338,213,329,214,319,215,311,215,306,216,296,218,\n290,221,283,225,282,233,284,238,287,243,290,250,291,255,294,261,293,265,291,271,291,273,289,278,287,\n279,285,281,280,284,278,284,276,287,277,289,283,291,286,294,291,296,295,299,300,301,304,304,320,305,\n327,306,332,307,341,306,349,303,354,301,364,301,371,297,375,292,384,291,386,302,393,324,391,333,387,\n328,375,329,367,329,353,330,341,331,328,336,319,338,310,341,304,341,285,341,278,343,269,344,262,346,\n259,346,251,349,259,349,264,349,273,349,280,349,288,349,295,349,298,354,293,356,286,354,279,352,268,\n352,257,351,249,350,234,351,211,352,197,354,185,353,171,351,154,348,147,342,137,339,132,330,122,327,\n120,314,116,304,117,293,118,284,118,281,122,275,128,265,129,257,131,244,133,239,134,228,136,221,137,\n214,138,209,135,201,132,192,130,184,131,175,129,170,131,159,134,157,134,160,130,170,125,176,114,176,\n102,173,103,172,108,171,111,163,115,156,116,149,117,142,116,136,115,129,115,124,115,120,115,115,117,\n113,120,109,122,102,122,100,121,95,121,89,115,87,110,82,109,84,118,89,123,93,129,100,130,108,132,110,\n133,110,136,107,138,105,140,95,138,86,141,79,149,77,155,81,162,90,165,97,167,99,171,109,171,107,161,\n111,156,113,170,115,185,118,208,117,223,121,239,128,251,133,259,136,266,139,276,143,290,148,310,151,\n332,155,348,156,353,153,366,149,379,147,394,146,399]\n\nsecond=[\n156,141,165,135,169,131,176,130,187,134,191,140,191,146,186,150,179,155,175,157,168,157,163,157,159,\n157,158,164,159,175,159,181,157,191,154,197,153,205,153,210,152,212,147,215,146,218,143,220,132,220,\n125,217,119,209,116,196,115,185,114,172,114,167,112,161,109,165,107,170,99,171,97,167,89,164,81,162,\n77,155,81,148,87,140,96,138,105,141,110,136,111,126,113,129,118,117,128,114,137,115,146,114,155,115,\n158,121,157,128,156,134,157,136,156,136]\n\nall_d= first + second\n\nimg = Image.new(\"RGB\", (640,480), \"rgb(60%,60%,90%)\")\npic = ImageDraw.Draw(img)\npic.line(all_d, fill='black')\nimg\n\n#http://www.pythonchallenge.com/pc/return/bull.html",
"_____no_output_____"
]
],
[
[
"### Python challenge question 11",
"_____no_output_____"
],
[
"For the next challenge, I tried URL \"http://www.pythonchallenge.com/pc/return/bull.html\" and it showed a picture of a bull. In text below it says ‘len(a[30]) = ?’. When I clicked the bull, which is clickable, a new page shoed a sequence ‘a = [1, 11, 21, 1211, 111221,..]'. When I googled this sequence, I came to know that it is called the look and say sequence. With little study about it, I got to know about it and it seemed that, in the challenge, we need to find the length of the 30th element in the sequence.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/bull.html\n\nfrom itertools import groupby\n\ndef lookandsay(n):\n return (''.join(str(len(list(g))) + k\n for k,g in groupby(n)))\n \nn='1'\nfor i in range(30):\n print(\"Term\", i,\"--\", n)\n n = lookandsay(n)\n\ntype(n)\nlen(n)\n\n#http://www.pythonchallenge.com/pc/return/5808.html",
"Term 0 -- 1\nTerm 1 -- 11\nTerm 2 -- 21\nTerm 3 -- 1211\nTerm 4 -- 111221\nTerm 5 -- 312211\nTerm 6 -- 13112221\nTerm 7 -- 1113213211\nTerm 8 -- 31131211131221\nTerm 9 -- 13211311123113112211\nTerm 10 -- 11131221133112132113212221\nTerm 11 -- 3113112221232112111312211312113211\nTerm 12 -- 1321132132111213122112311311222113111221131221\nTerm 13 -- 11131221131211131231121113112221121321132132211331222113112211\nTerm 14 -- 311311222113111231131112132112311321322112111312211312111322212311322113212221\nTerm 15 -- 132113213221133112132113311211131221121321131211132221123113112221131112311332111213211322211312113211\nTerm 16 -- 11131221131211132221232112111312212321123113112221121113122113111231133221121321132132211331121321231231121113122113322113111221131221\nTerm 17 -- 31131122211311123113321112131221123113112211121312211213211321322112311311222113311213212322211211131221131211132221232112111312111213111213211231131122212322211331222113112211\nTerm 18 -- 1321132132211331121321231231121113112221121321132122311211131122211211131221131211132221121321132132212321121113121112133221123113112221131112311332111213122112311311123112111331121113122112132113213211121332212311322113212221\nTerm 19 -- 11131221131211132221232112111312111213111213211231132132211211131221131211221321123113213221123113112221131112311332211211131221131211132211121312211231131112311211232221121321132132211331121321231231121113112221121321133112132112312321123113112221121113122113121113123112112322111213211322211312113211\nTerm 20 -- 311311222113111231133211121312211231131112311211133112111312211213211312111322211231131122211311122122111312211213211312111322211213211321322113311213212322211231131122211311123113223112111311222112132113311213211221121332211211131221131211132221232112111312111213111213211231132132211211131221232112111312211213111213122112132113213221123113112221131112311311121321122112132231121113122113322113111221131221\nTerm 21 -- 132113213221133112132123123112111311222112132113311213211231232112311311222112111312211311123113322112132113213221133122112231131122211211131221131112311332211211131221131211132221232112111312111213322112132113213221133112132113221321123113213221121113122123211211131221222112112322211231131122211311123113321112131221123113111231121113311211131221121321131211132221123113112211121312211231131122211211133112111311222112111312211312111322211213211321322113311213211331121113122122211211132213211231131122212322211331222113112211\nTerm 22 -- 111312211312111322212321121113121112131112132112311321322112111312212321121113122112131112131221121321132132211231131122211331121321232221121113122113121113222123112221221321132132211231131122211331121321232221123113112221131112311332111213122112311311123112112322211211131221131211132221232112111312211322111312211213211312111322211231131122111213122112311311221132211221121332211213211321322113311213212312311211131122211213211331121321123123211231131122211211131221131112311332211213211321223112111311222112132113213221123123211231132132211231131122211311123113322112111312211312111322212321121113122123211231131122113221123113221113122112132113213211121332212311322113212221\nTerm 23 -- 3113112221131112311332111213122112311311123112111331121113122112132113121113222112311311221112131221123113112221121113311211131122211211131221131211132221121321132132212321121113121112133221123113112221131112311332111213213211221113122113121113222112132113213221232112111312111213322112132113213221133112132123123112111311222112132113311213211221121332211231131122211311123113321112131221123113112221132231131122211211131221131112311332211213211321223112111311222112132113212221132221222112112322211211131221131211132221232112111312111213111213211231132132211211131221232112111312211213111213122112132113213221123113112221133112132123222112111312211312112213211231132132211211131221131211132221121311121312211213211312111322211213211321322113311213212322211231131122211311123113321112131221123113112211121312211213211321222113222112132113223113112221121113122113121113123112112322111213211322211312113211\nTerm 24 -- 132113213221133112132123123112111311222112132113311213211231232112311311222112111312211311123113322112132113212231121113112221121321132132211231232112311321322112311311222113111231133221121113122113121113221112131221123113111231121123222112132113213221133112132123123112111312111312212231131122211311123113322112111312211312111322111213122112311311123112112322211211131221131211132221232112111312111213111213211231132132211211131221232112111312212221121123222112132113213221133112132123123112111311222112132113213221132213211321322112311311222113311213212322211211131221131211221321123113213221121113122113121132211332113221122112133221123113112221131112311332111213122112311311123112111331121113122112132113121113222112311311221112131221123113112221121113311211131122211211131221131211132221121321132132212321121113121112133221123113112221131112212211131221121321131211132221123113112221131112311332211211133112111311222112111312211311123113322112111312211312111322212321121113121112133221121321132132211331121321231231121113112221121321132122311211131122211211131221131211322113322112111312211322132113213221123113112221131112311311121321122112132231121113122113322113111221131221\nTerm 25 -- 1113122113121113222123211211131211121311121321123113213221121113122123211211131221121311121312211213211321322112311311222113311213212322211211131221131211221321123113213221121113122113121113222112131112131221121321131211132221121321132132211331121321232221123113112221131112311322311211131122211213211331121321122112133221121113122113121113222123211211131211121311121321123113111231131122112213211321322113311213212322211231131122211311123113223112111311222112132113311213211221121332211231131122211311123113321112131221123113111231121113311211131221121321131211132221123113112211121312211231131122113221122112133221121113122113121113222123211211131211121311121321123113213221121113122113121113222113221113122113121113222112132113213221232112111312111213322112311311222113111221221113122112132113121113222112311311222113111221132221231221132221222112112322211213211321322113311213212312311211131122211213211331121321123123211231131122211211131221131112311332211213211321223112111311222112132113213221123123211231132132211231131122211311123113322112111312211312111322111213122112311311123112112322211213211321322113312211223113112221121113122113111231133221121321132132211331121321232221123123211231132132211231131122211331121321232221123113112221131112311332111213122112311311123112112322211211131221131211132221232112111312111213111213211231132132211211131221131211221321123113213221123113112221131112211322212322211231131122211322111312211312111322211213211321322113311213211331121113122122211211132213211231131122212322211331222113112211\nTerm 26 -- 31131122211311123113321112131221123113111231121113311211131221121321131211132221123113112211121312211231131122211211133112111311222112111312211312111322211213211321322123211211131211121332211231131122211311122122111312211213211312111322211231131122211311123113322112111331121113112221121113122113111231133221121113122113121113222123211211131211121332211213211321322113311213211322132112311321322112111312212321121113122122211211232221123113112221131112311332111213122112311311123112111331121113122112132113311213211321222122111312211312111322212321121113121112133221121321132132211331121321132213211231132132211211131221232112111312212221121123222112132113213221133112132123123112111311222112132113311213211231232112311311222112111312211311123113322112132113212231121113112221121321132122211322212221121123222112311311222113111231133211121312211231131112311211133112111312211213211312111322211231131122211311123113322113223113112221131112311332211211131221131211132211121312211231131112311211232221121321132132211331221122311311222112111312211311123113322112132113213221133122211332111213112221133211322112211213322112111312211312111322212321121113121112131112132112311321322112111312212321121113122112131112131221121321132132211231131122211331121321232221121113122113121122132112311321322112111312211312111322211213111213122112132113121113222112132113213221133112132123222112311311222113111231132231121113112221121321133112132112211213322112111312211312111322212311222122132113213221123113112221133112132123222112111312211312111322212321121113121112133221121311121312211213211312111322211213211321322123211211131211121332211213211321322113311213212312311211131122211213211331121321122112133221123113112221131112311332111213122112311311123112111331121113122112132113121113222112311311222113111221221113122112132113121113222112132113213221133122211332111213322112132113213221132231131122211311123113322112111312211312111322212321121113122123211231131122113221123113221113122112132113213211121332212311322113212221\nTerm 27 -- 13211321322113311213212312311211131122211213211331121321123123211231131122211211131221131112311332211213211321223112111311222112132113213221123123211231132132211231131122211311123113322112111312211312111322111213122112311311123112112322211213211321322113312211223113112221121113122113111231133221121321132132211331121321232221123123211231132132211231131122211331121321232221123113112221131112311332111213122112311311123112112322211211131221131211132221232112111312211322111312211213211312111322211231131122111213122112311311221132211221121332211213211321322113311213212312311211131122211213211331121321123123211231131122211211131221232112111312211312113211223113112221131112311332111213122112311311123112112322211211131221131211132221232112111312211322111312211213211312111322211231131122111213122112311311221132211221121332211211131221131211132221232112111312111213111213211231132132211211131221232112111312211213111213122112132113213221123113112221133112132123222112111312211312112213211231132132211211131221131211322113321132211221121332211213211321322113311213212312311211131122211213211331121321123123211231131122211211131221131112311332211213211321322113311213212322211322132113213221133112132123222112311311222113111231132231121113112221121321133112132112211213322112111312211312111322212311222122132113213221123113112221133112132123222112111312211312111322212311322123123112111321322123122113222122211211232221123113112221131112311332111213122112311311123112111331121113122112132113121113222112311311221112131221123113112221121113311211131122211211131221131211132221121321132132212321121113121112133221123113112221131112212211131221121321131211132221123113112221131112311332211211133112111311222112111312211311123113322112111312211312111322212321121113121112133221121321132132211331121321132213211231132132211211131221232112111312212221121123222112311311222113111231133211121321321122111312211312111322211213211321322123211211131211121332211231131122211311123113321112131221123113111231121123222112111331121113112221121113122113111231133221121113122113121113221112131221123113111231121123222112111312211312111322212321121113121112131112132112311321322112111312212321121113122122211211232221121321132132211331121321231231121113112221121321133112132112312321123113112221121113122113111231133221121321132132211331221122311311222112111312211311123113322112111312211312111322212311322123123112112322211211131221131211132221132213211321322113311213212322211231131122211311123113321112131221123113112211121312211213211321222113222112132113223113112221121113122113121113123112112322111213211322211312113211\nTerm 28 -- 11131221131211132221232112111312111213111213211231132132211211131221232112111312211213111213122112132113213221123113112221133112132123222112111312211312112213211231132132211211131221131211132221121311121312211213211312111322211213211321322113311213212322211231131122211311123113223112111311222112132113311213211221121332211211131221131211132221231122212213211321322112311311222113311213212322211211131221131211132221232112111312111213322112131112131221121321131211132221121321132132212321121113121112133221121321132132211331121321231231121113112221121321133112132112211213322112311311222113111231133211121312211231131122211322311311222112111312211311123113322112132113212231121113112221121321132122211322212221121123222112111312211312111322212321121113121112131112132112311321322112111312212321121113122112131112131221121321132132211231131122111213122112311311222113111221131221221321132132211331121321231231121113112221121321133112132112211213322112311311222113111231133211121312211231131122211322311311222112111312211311123113322112132113212231121113112221121321132122211322212221121123222112311311222113111231133211121312211231131112311211133112111312211213211312111322211231131122111213122112311311222112111331121113112221121113122113121113222112132113213221232112111312111213322112311311222113111221221113122112132113121113222112311311222113111221132221231221132221222112112322211211131221131211132221232112111312111213111213211231132132211211131221232112111312211213111213122112132113213221123113112221133112132123222112111312211312111322212321121113121112133221132211131221131211132221232112111312111213322112132113213221133112132113221321123113213221121113122123211211131221222112112322211231131122211311123113321112132132112211131221131211132221121321132132212321121113121112133221123113112221131112311332111213211322111213111213211231131211132211121311222113321132211221121332211213211321322113311213212312311211131122211213211331121321123123211231131122211211131221131112311332211213211321223112111311222112132113213221123123211231132132211231131122211311123113322112111312211312111322111213122112311311123112112322211213211321322113312211223113112221121113122113111231133221121321132132211331121321232221123123211231132132211231131122211331121321232221123113112221131112311332111213122112311311123112112322211211131221131211132221232112111312211322111312211213211312111322211231131122111213122112311311221132211221121332211213211321322113311213212312311211131211131221223113112221131112311332211211131221131211132211121312211231131112311211232221121321132132211331121321231231121113112221121321133112132112211213322112312321123113213221123113112221133112132123222112311311222113111231132231121113112221121321133112132112211213322112311311222113111231133211121312211231131112311211133112111312211213211312111322211231131122111213122112311311221132211221121332211211131221131211132221232112111312111213111213211231132132211211131221232112111312211213111213122112132113213221123113112221133112132123222112111312211312111322212311222122132113213221123113112221133112132123222112311311222113111231133211121321132211121311121321122112133221123113112221131112311332211322111312211312111322212321121113121112133221121321132132211331121321231231121113112221121321132122311211131122211211131221131211322113322112111312211322132113213221123113112221131112311311121321122112132231121113122113322113111221131221\nTerm 29 -- 3113112221131112311332111213122112311311123112111331121113122112132113121113222112311311221112131221123113112221121113311211131122211211131221131211132221121321132132212321121113121112133221123113112221131112212211131221121321131211132221123113112221131112311332211211133112111311222112111312211311123113322112111312211312111322212321121113121112133221121321132132211331121321132213211231132132211211131221232112111312212221121123222112311311222113111231133211121321321122111312211312111322211213211321322123211211131211121332211231131122211311123113321112131221123113111231121123222112111331121113112221121113122113111231133221121113122113121113221112131221123113111231121123222112111312211312111322212321121113121112131112132112311321322112111312212321121113122122211211232221121321132132211331121321231231121113112221121321132132211322132113213221123113112221133112132123222112111312211312112213211231132132211211131221131211322113321132211221121332211231131122211311123113321112131221123113111231121113311211131221121321131211132221123113112211121312211231131122211211133112111311222112111312211312111322211213211321223112111311222112132113213221133122211311221122111312211312111322212321121113121112131112132112311321322112111312212321121113122122211211232221121321132132211331121321231231121113112221121321132132211322132113213221123113112221133112132123222112111312211312112213211231132132211211131221131211322113321132211221121332211213211321322113311213212312311211131122211213211331121321123123211231131122211211131221131112311332211213211321223112111311222112132113213221123123211231132132211231131122211311123113322112111312211312111322111213122112311311123112112322211213211321322113312211223113112221121113122113111231133221121321132132211331222113321112131122211332113221122112133221123113112221131112311332111213122112311311123112111331121113122112132113121113222112311311221112131221123113112221121113311211131122211211131221131211132221121321132132212321121113121112133221123113112221131112311332111213122112311311123112112322211322311311222113111231133211121312211231131112311211232221121113122113121113222123211211131221132211131221121321131211132221123113112211121312211231131122113221122112133221121321132132211331121321231231121113121113122122311311222113111231133221121113122113121113221112131221123113111231121123222112132113213221133112132123123112111312211322311211133112111312211213211311123113223112111321322123122113222122211211232221121113122113121113222123211211131211121311121321123113213221121113122123211211131221121311121312211213211321322112311311222113311213212322211211131221131211221321123113213221121113122113121113222112131112131221121321131211132221121321132132211331121321232221123113112221131112311322311211131122211213211331121321122112133221121113122113121113222123112221221321132132211231131122211331121321232221121113122113121113222123211211131211121332211213111213122112132113121113222112132113213221232112111312111213322112132113213221133112132123123112111311222112132113311213211221121332211231131122211311123113321112131221123113112221132231131122211211131221131112311332211213211321223112111311222112132113212221132221222112112322211211131221131211132221232112111312111213111213211231131112311311221122132113213221133112132123222112311311222113111231132231121113112221121321133112132112211213322112111312211312111322212321121113121112131112132112311321322112111312212321121113122122211211232221121311121312211213211312111322211213211321322123211211131211121332211213211321322113311213211322132112311321322112111312212321121113122122211211232221121321132132211331121321231231121113112221121321133112132112312321123113112221121113122113111231133221121321132122311211131122211213211321222113222122211211232221123113112221131112311332111213122112311311123112111331121113122112132113121113222112311311221112131221123113112221121113311211131122211211131221131211132221121321132132212321121113121112133221123113112221131112311332111213213211221113122113121113222112132113213221232112111312111213322112132113213221133112132123123112111312211322311211133112111312212221121123222112132113213221133112132123222113223113112221131112311332111213122112311311123112112322211211131221131211132221232112111312111213111213211231132132211211131221131211221321123113213221123113112221131112211322212322211231131122211322111312211312111322211213211321322113311213211331121113122122211211132213211231131122212322211331222113112211\n"
]
],
[
[
"### Python challenge question 12",
"_____no_output_____"
],
[
"For the next challenge, I tried URL \"http://www.pythonchallenge.com/pc/return/5808.html\" and it showed a blurry picture with page title 'odd even'. When I checked page source, there is nothing much except cave.jpg, which when clicked got to the same image. I tried searching '\"cave in python\" to see if I'll find a module named 'cave' but got nothing.\n\nI tried opening the image in \"paint\" and observed that the image some black pixels alternately. I had no idea where to go in this challenge and so googled regarding this challenge for some hints and got some. When I tried to check the pixels to confirm what I found when I opened the image on paint. When I used im.getpixels() to get the pixel values, the odd pixels have the image in, and the even pixels have some other image.\n\nI tried to blank the even pixels and odd pixels alternatively to check the changes in the image. The image changed when I blanked the odd pixels to get a new image with \"evil\" on it.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/5808.html\n\nimport urllib.request\nfrom PIL import Image\nfrom io import StringIO\n\n#url = 'http://www.pythonchallenge.com/pc/return/cave.jpg'\n#img_cav = urllib.request.urlopen(url).read()\n#img = Image.open(StringIO.StringIO(img_cav))\n\nim = Image.open('cave.jpg')\nim.size\nw, h = im.size\n#new = Image.new(\"RGB\", (w, h))\n\nprint(im.getpixel((0,0)))\nprint(im.getpixel((0,1)))\nprint(im.getpixel((1,0)))\nprint(im.getpixel((1,1)))\nprint(im.getpixel((1,2)))\nprint(im.getpixel((1,3)))\nprint(im.getpixel((1,4)))\nprint(im.getpixel((1,5)))\n\nfor i in range(w):\n for j in range(h):\n #if (i + j) % 2 == 0: # Blanked the even pixels\n if (i + j) % 2 == 1: # Blanked the odd pixels\n im.putpixel((i,j), 0)\n\nim\n\n#http://www.pythonchallenge.com/pc/return/evil.html",
"_____no_output_____"
]
],
[
[
"### Python challenge question 13",
"_____no_output_____"
],
[
"For the next challenge, I tried URL \"http://www.pythonchallenge.com/pc/return/evil.html\" and it showed a picture of a man dealing with cards. When I checked page source, there is a link which redirected me to the URL \"http://www.pythonchallenge.com/pc/return/evil1.jpg\". When I changed the URL to \"http://www.pythonchallenge.com/pc/return/evil2.jpg\", it showed some other image that said \"not jpg - .gfx\". I further checked \"http://www.pythonchallenge.com/pc/return/evil3.jpg\", it showed an image saying \"no more evils\".\n\nWhen I changed the URL of \"evil2\" to .gfx, it downloaded a file with name \"evil2.gfx\". The image previously showed a hand dealing with 5 cards, so the byte code generated from .gfx file is split into 5 images (googled to get this clue). Then, I got 5 images which said \"dis\", \"pro\", \"port\", \"ional\" and \"ity\". I first tried \"disproportionality\" to get to the next challenge but didnot work. O examining, I observed that \"ity\" is striked out in the image and so used \"http://www.pythonchallenge.com/pc/return/disproportional.html\" for the next challenge.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/evil.html\n\nimport requests\nfrom PIL import Image\n\n#url_evl = \"http://www.pythonchallenge.com/pc/return/evil2.gfx\"\n#un, pw = 'huge', 'file'\n#d = requests.get(url_evl, auth=(un, pw)).content\n#print(d)\n\ndata = open(\"evil2.gfx\", \"rb\").read()\n#print(data)\n\nfor i in range(0,5):\n open('%d.png' %i ,'wb').write(data[i::5])\n\nim0 = Image.open('0.png')\nim0\n\nim1 = Image.open('1.png')\nim1 \n\nim2 = Image.open('2.png')\nim2 \n\nim3 = Image.open('3.png')\nim3 \n\nim4 = Image.open('4.png')\nim4 \n\n#http://www.pythonchallenge.com/pc/return/disproportional.html ",
"_____no_output_____"
]
],
[
[
"### Python challenge question 14",
"_____no_output_____"
],
[
"For the next challenge, I tried URL \"http://www.pythonchallenge.com/pc/return/disproportional.html\" and it gave an image with numbers on phone and text \"phone that evil\". The number \"5\" is clickable and it took me to URL \"http://www.pythonchallenge.com/pc/phonebook.php\" which is XML file.\n\nOn checking page source, there is a text saying \"phone that <remote /> evil\". I googled \"remote module python\" and with some digging, I found about xmlrpc client. Also previously, we know that number \"5\" is clickable. With these two clues, I found that there is something that needs to be done in the previous challenge that has a link to this challenge.\n\nI tried to open evil4.jpg but nothing came. So, I read it to see if I get anything and I got \"'Bert is evil! go back!\". \n\nWith xmlrpc, I found the list of methods and with a phone picture and \"phonebook.php\" clues, I decided to use \"phone\" method.\nAnd with the clue obtained previously \"Bert\", I tried to use the name to get the number.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/disproportional.html\n\nurl_evl = \"http://www.pythonchallenge.com/pc/return/evil4.jpg\"\nun, pw = 'huge', 'file'\nd = requests.get(url_evl, auth=(un, pw)).content\nprint(d)\n\nimport xmlrpc.client\n\nurl_pb = 'http://www.pythonchallenge.com/pc/phonebook.php'\nwith xmlrpc.client.ServerProxy(url_pb) as proxy:\n print(proxy.system.listMethods())\n\nprint(proxy.system.methodHelp('phone'))\nprint(proxy.system.methodSignature('phone'))\nprint(proxy.phone('Bert'))\n\n#http://www.pythonchallenge.com/pc/return/italy.html",
"b'Bert is evil! go back!\\n'\n['phone', 'system.listMethods', 'system.methodHelp', 'system.methodSignature', 'system.multicall', 'system.getCapabilities']\nReturns the phone of a person\n[['string', 'string']]\n555-ITALY\n"
]
],
[
[
"### Python challenge question 15",
"_____no_output_____"
],
[
"For the next challenge, I tried URL \"http://www.pythonchallenge.com/pc/return/italy.html\" and it gave an image of a roll in spiral form and other square image with vertical lines. The pagetitle is \"walk around\". When I checked the page source, it has a link to \"http://www.pythonchallenge.com/pc/return/wire.png\" which I saved. The title has (10000 by 1) and when I checked the \"wire.png\" on paint, it has dimension of 10000 by 1. When I zoomed the image, there is a line.\n\nAlso, there is a text \"remember: 100*100 = (100+99+99+98) + (... \"",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/italy.html\n\n\n\n#http://www.pythonchallenge.com/pc/return/uzi.html",
"_____no_output_____"
]
],
[
[
"### Python challenge question 16",
"_____no_output_____"
],
[
"For this challenge, I had to see the URL of the previous challenge. \n\nFor the next challenge, I tried URL \"http://www.pythonchallenge.com/pc/return/uzi.html\" and it gave an image with calendar with year 1_6 and January 26th rounded, which is a Monday. Also, when I checked the page source, there is a text \"todo: buy flowers for tomorrow\" which means it is an important day. Also, the text \"he ain't the youngest, he is the second\" means that he is second youngest.\n\nAlso, the calendar shows February hs 29 days, so, I figured that this is a leap year. The year might be anything between 1006 and 1996. With a little digging in Python, I found that there are a couple of modules useful for this challenge \"datetime\" and \"calendar\". So, I tried to get the year, but gave 5 years, i.e, 1176, 1356, 1576, 1756 and 1976.\n\nWith the clues mentioned previously, second youngest means 1756 and so tried to use that for the next challenge, but returned nothing. Then I got a hunch from other clue \"to buy flowers\" and so googled the year and date to find that it was Mozart's birthday after a couple of tries with \"Benjamin Franklin\" too.",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/uzi.html\n\nimport datetime\nimport calendar\n\nfor year in range(1006, 2000, 10):\n if calendar.isleap(year) and datetime.date(year, 1, 26).weekday() == 0:\n print(year)\n \n\n#http://www.pythonchallenge.com/pc/return/mozart.html",
"1176\n1356\n1576\n1756\n1976\n"
]
],
[
[
"#### Python challenge question 17",
"_____no_output_____"
]
],
[
[
"#http://www.pythonchallenge.com/pc/return/mozart.html\n\n",
"_____no_output_____"
]
],
[
[
"#### Python challenge question 18",
"_____no_output_____"
],
[
"#### Python challenge question 19",
"_____no_output_____"
],
[
"#### Python challenge question 20",
"_____no_output_____"
],
[
"#### Python challenge question 21",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e79a71c6230bb448f3171248b9b3ce52c883033a | 95,869 | ipynb | Jupyter Notebook | notebooks/1D_test.ipynb | nordam/Discontinuities | 940556a42174dfc648bef6ae9da0335aee229c82 | [
"MIT"
] | null | null | null | notebooks/1D_test.ipynb | nordam/Discontinuities | 940556a42174dfc648bef6ae9da0335aee229c82 | [
"MIT"
] | null | null | null | notebooks/1D_test.ipynb | nordam/Discontinuities | 940556a42174dfc648bef6ae9da0335aee229c82 | [
"MIT"
] | null | null | null | 264.101928 | 54,904 | 0.908521 | [
[
[
"# Import numpy and matplotlib, and use jupyter magic to\n# get plots directly in notebook\nimport numpy as np\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nfrom numba import jit\nfrom tqdm import tqdm, trange\nimport sys",
"_____no_output_____"
]
],
[
[
"# Testing a 1D case",
"_____no_output_____"
]
],
[
[
"from scipy.interpolate import interp1d\nfrom scipy.optimize import bisect",
"_____no_output_____"
],
[
"# 4th-order Runge-Kutta\ndef rk4(x, t, h, f):\n # x is coordinates (as a vector)\n # h is timestep\n # f(x) is a function that returns the derivative\n # \"Slopes\"\n k1 = f(x, t)\n k2 = f(x + k1*h/2, t + h/2)\n k3 = f(x + k2*h/2, t + h/2)\n k4 = f(x + k3*h, t + h)\n # Update time and position\n x_ = x + h*(k1 + 2*k2 + 2*k3 + k4)/6\n return x_\n\ndef trajectory(X0, Tmax, h, f, integrator, progressbar = False):\n # Number of timesteps\n Nt = int((Tmax) / h)\n # Add 2 for initial position and fractional step at end\n # X0.size lets X hold Nt+2 arrays of the same size as X0\n X0 = np.array(X0)\n X = np.zeros((Nt+2, X0.size))\n T = np.zeros(Nt+2)\n # Initialise\n X[0,:] = X0\n T[0] = 0\n if progressbar:\n iterator = trange\n else:\n iterator = range\n # Loop over timesteps\n t = 0\n for i in iterator(Nt+1):\n # Make sure the last step stops exactly at Tmax\n h = min(h, Tmax - t)\n # Calculate next position\n X[i+1,:] = integrator(X[i,:], t, h, f)\n T[i+1] = T[i] + h\n # Increment time\n t += h\n return X, T\n\n# 4th-order Runge-Kutta\ndef rk4_dense(x, t, h, f):\n # x is coordinates (as a vector)\n # h is timestep\n # f(x) is a function that returns the derivative\n # \"Slopes\"\n k1 = f(x, t)\n k2 = f(x + k1*h/2, t + h/2)\n k3 = f(x + k2*h/2, t + h/2)\n k4 = f(x + k3*h, t + h)\n # Update time and position\n x_ = x + h*(k1 + 2*k2 + 2*k3 + k4)/6\n return x_, k1\n\ndef hermite(x0, k0, t0, x1, k1, t, h):\n # Calculate theta, a number in [0, 1] indicating position\n # within each interval\n theta = (t - t0) / h\n return (1-theta)*x0 + theta*x1 + theta*(theta-1)*((1-2*theta)*(x1-x0) + (theta-1)*h*k0 + theta*h*k1)\n\ndef trajectory_special(X0, Tmax, h0, f, integrator, discontinuities, progressbar = False):\n # Initialise\n X = [X0]\n T = [0.0]\n # keep track of the position relative to\n # the discontinuities.\n j = np.searchsorted(discontinuities, X0)\n # Loop over timesteps\n t = 0\n x = X0\n # iteration counter\n i = 0\n # Progress bar for long simulations\n if progressbar:\n pbar = tqdm(total = Tmax)\n while t < Tmax:\n # Make sure the last step stops exactly at Tmax\n h = min(h0, Tmax - t)\n # tentatively calculate next position\n x_, k = integrator(X[i], t, h, f)\n t_ = t + h\n # check for crossing of discontinuity\n j_ = np.searchsorted(discontinuities, x_)\n if j_ != j:\n # We have crossed one or more discontinuities,\n # find the time at which we crossed the first.\n if j_ > j:\n x_cross = discontinuities[j]\n else:\n x_cross = discontinuities[j-1]\n # if we are exactly at boundary, accept and move on\n if x_cross != x:\n # Get derivative at end of step\n # (k is already the derivative at the start of the step)\n k_ = f(x_, t_)\n # create hermite interpolator to use in bisection\n dense = lambda t_: hermite(x, k, t, x_, k_, t_, h) - x_cross\n # find time of crossing\n t_cross = bisect(dense, t, t + h)\n # Step to that time instead of the original time\n # (but never step across Tmax)\n h = min(t_cross - t, Tmax - t)\n x_, k = integrator(X[i], t, h, f)\n t_ = t + h\n # Update variables\n x = x_\n t = t_\n i += 1\n j = np.searchsorted(discontinuities, x)\n # Store progress\n X.append(x)\n T.append(t)\n if progressbar:\n # Update progress\n pbar.update(h)\n # Break to prevent infinite loop\n # (should never happen, but convenient in debugging)\n if i > 10*(Tmax/h0):\n print('Seems to get stuck in infinite loop')\n print('(or at least a very long loop)')\n print(X, T)\n break\n if progressbar:\n pbar.close()\n return X, T",
"_____no_output_____"
]
],
[
[
"## Run a quick test to verify that results don't look crazy",
"_____no_output_____"
]
],
[
[
"# Problem properties\nX0 = 50\nTmax = 10\ndt = 0.01\n\n# Interpolation points\nxc = np.linspace(0, 100, 1001)\n\n# kind of interpolation\n#kind = 'linear'\nkind = 'quadratic'\n#kind = 'cubic'\n\n\nfig = plt.figure(figsize = (9, 5))\n\n# Positive derivative\ninterpolator = interp1d(xc, 1.2 + np.sin(2*np.pi*xc), kind = kind)\nf = lambda x, t: interpolator(x)\nX_, T_ = trajectory_special(X0, Tmax, dt, f, rk4_dense, xc)\nX, T = trajectory(X0, Tmax, dt, f, rk4)\nplt.plot(T, X, label = 'RK4')\nplt.plot(T_, X_, '--', label = 'RK4 event detection')\n\n# Negative derivative\ninterpolator = interp1d(xc, -1.2 - np.sin(2*np.pi*xc), kind = kind)\nf = lambda x, t: interpolator(x)\nX_, T_ = trajectory_special(X0, Tmax, dt, f, rk4_dense, xc)\nX, T = trajectory(X0, Tmax, dt, f, rk4)\nplt.plot(T, X, label = 'RK4')\nplt.plot(T_, X_, '--', label = 'RK4 event detection')\n\nplt.xlabel('Time')\nplt.ylabel('X')\nplt.legend()\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"## Run convergence test",
"_____no_output_____"
]
],
[
[
"X0 = 0\nTmax = 10\n\n# Interopolation points\nxc = np.linspace(0, 100, 1001)\n\n# kind of interpolation\nkind = 'linear'\n#kind = 'quadratic'\n#kind = 'cubic'\n\n# create interpolator, and wrap with lambda to get f(x, t)\ninterpolator = interp1d(xc, 2 + np.sin(2*np.pi*xc), kind = kind)\nf = lambda x, t: interpolator(x)\n\n\n# Reference solution\n# (calculating the reference solution with the special integrator\n# was found to work better)\ndt_ref = 0.0002\nX_ref_, T_ref = trajectory_special(X0, Tmax, dt_ref, f, rk4_dense, xc, progressbar = True)\n\n\n# List of timesteps to investigate\ndt_list = np.logspace(-3, -1, 100)\n\n# Arrays to keep track of errors\nerrors = np.zeros(len(dt_list))\nerrors_special = np.zeros(len(dt_list))\n\n# Loop over timesteps and calculate error\nfor i, dt in tqdm(enumerate(dt_list), total = len(dt_list)):\n X, T = trajectory(X0, Tmax, dt, f, rk4)\n errors[i] = np.abs(X_ref_[-1] - X[-1])\n X_, T_ = trajectory_special(X0, Tmax, dt, f, rk4_dense, xc)\n errors_special[i] = np.abs(X_ref_[-1] - X_[-1])",
"100%|██████████| 10.0/10 [00:05<00:00, 1.98it/s] \n100%|██████████| 100/100 [00:48<00:00, 2.07it/s]\n"
],
[
"fig = plt.figure(figsize = (7, 4))\n\n# Plot errors\nplt.plot(dt_list, errors, label = 'RK4')\nplt.plot(dt_list, errors_special, label = 'RK4 event detection')\n\n# Plot trendlines\nplt.plot(dt_list, 1e-1*dt_list**2, '--', c = 'k', label = '$h^2$')\nplt.plot(dt_list, 1e-0*dt_list**3, '-.', c = 'k', label = '$h^3$')\nplt.plot(dt_list, 1e+1*dt_list**4, ':', c = 'k', label = '$h^4$')\n\n# scales and labels, etc.\nplt.xscale('log')\nplt.yscale('log')\nplt.ylabel('Global error')\nplt.xlabel('Timestep, $h$')\nplt.legend(fontsize = 12, loc = 'lower right')\nplt.tight_layout()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79a74192708558316c7fe2793597a12050007a3 | 45,371 | ipynb | Jupyter Notebook | ssd512_training _2.ipynb | hidekazu300/ssd_512_2 | 3d0ea548cda7e8e0cd2ca31973b531bbebcfc250 | [
"Apache-2.0"
] | null | null | null | ssd512_training _2.ipynb | hidekazu300/ssd_512_2 | 3d0ea548cda7e8e0cd2ca31973b531bbebcfc250 | [
"Apache-2.0"
] | null | null | null | ssd512_training _2.ipynb | hidekazu300/ssd_512_2 | 3d0ea548cda7e8e0cd2ca31973b531bbebcfc250 | [
"Apache-2.0"
] | null | null | null | 64.815714 | 2,314 | 0.62994 | [
[
[
"# SSD512 Training\n\n正しく学習できたらこんな感じ\n[SSD300 \"07+12\" training summary](https://github.com/pierluigiferrari/ssd_keras/blob/master/training_summaries/ssd300_pascal_07%2B12_training_summary.md)",
"_____no_output_____"
]
],
[
[
"from tensorflow.python.keras.optimizers import Adam, SGD\nfrom tensorflow.python.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.models import load_model\nfrom math import ceil\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom models.keras_ssd512 import ssd_512\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\nfrom keras_layers.keras_layer_L2Normalization import L2Normalization\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_geometric_ops import Resize\nfrom data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n\nfrom make_annotation import Make_PicXML\nfrom make_annotation import Make_txt\n\n%matplotlib inline",
"_____no_output_____"
],
[
"home_path = 'C:/tensorflow1/ssd_512_2/'",
"_____no_output_____"
],
[
"img_height = 512 # Height of the model input images\nimg_width = 512 # Width of the model input images\nimg_channels = 3 # Number of color channels of the model input images\nmean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\nswap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\nn_classes = 34 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\nscales = [0.07, 0.15, 0.3, 0.45, 0.6, 0.75, 0.9, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\n\naspect_ratios = [[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]] \ntwo_boxes_for_ar1 = True\nsteps = [8, 16, 32, 64, 128, 256, 512] # The space between two adjacent anchor box center points for each predictor layer.\noffsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\nclip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\nvariances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\nnormalize_coords = True",
"_____no_output_____"
],
[
"# 1: Build the Keras model.\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = ssd_512(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_per_layer= aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=mean_color,\n swap_channels=swap_channels)\n\n# 2: Load some weights into the model.\n\nweights_path = home_path + 'VGG_ILSVRC_16_layers_fc_reduced.h5'\n\nmodel.load_weights(weights_path, by_name=True)\n\n# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n# Adam optimizer がおすすめ.\n\nadam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n#sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nmodel.compile(optimizer=adam, loss=ssd_loss.compute_loss)\n",
"_____no_output_____"
]
],
[
[
"## 0. Make annotation data",
"_____no_output_____"
]
],
[
[
"datasize = 10\n\nMake_PicXML(sample_filename = 'sample/home' ,\n save_pic_filename = 'DATASET/JPEGImages',\n save_xml_filename = 'DATASET/Annotations',\n robust = 0 ,\n datasize = datasize )\n\nMake_txt( save_file = 'DATASET', datasize = datasize , percent = 0.2 )",
"_____no_output_____"
]
],
[
[
"## 1. Set the model configuration parameters\n\nパラメーターを設定する。",
"_____no_output_____"
],
[
"## 2. Build or load the model\n\n初めてであれば2.1を、2回目の学習以降は2.2を実行。両方はだめ",
"_____no_output_____"
],
[
"### 2.1 Create a new model and load trained VGG-16 weights into it (or trained SSD weights)\n\nIf you want to create a new SSD300 model, this is the relevant section for you. If you want to load a previously saved SSD300 model, skip ahead to section 2.2.\n\nThe code cell below does the following things:\n1. It calls the function `ssd_300()` to build the model.\n2. It then loads the weights file that is found at `weights_path` into the model. You could load the trained VGG-16 weights or you could load the weights of a trained model. If you want to reproduce the original SSD training, load the pre-trained VGG-16 weights. In any case, you need to set the path to the weights file you want to load on your local machine. Download links to all the trained weights are provided in the [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository.\n3. Finally, it compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.\n\nNormally, the optimizer of choice would be Adam (commented out below), but since the original implementation uses plain SGD with momentum, we'll do the same in order to reproduce the original training. Adam is generally the superior optimizer, so if your goal is not to have everything exactly as in the original training, feel free to switch to Adam. You might need to adjust the learning rate scheduler below slightly in case you use Adam.\n\nNote that the learning rate that is being set here doesn't matter, because further below we'll pass a learning rate scheduler to the training function, which will overwrite any learning rate set here, i.e. what matters are the learning rates that are defined by the learning rate scheduler.\n\n`SSDLoss` is a custom Keras loss function that implements the multi-task that consists of a log loss for classification and a smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.",
"_____no_output_____"
],
[
"### 2.2 Load a previously created model\n\nIf you have previously created and saved a model and would now like to load it, execute the next code cell. The only thing you need to do here is to set the path to the saved model HDF5 file that you would like to load.\n\nThe SSD model contains custom objects: Neither the loss function nor the anchor box or L2-normalization layer types are contained in the Keras core library, so we need to provide them to the model loader.\n\nThis next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below.",
"_____no_output_____"
]
],
[
[
"\"\"\"\n# TODO: Set the path to the `.h5` file of the model to be loaded.\nmodel_path = 'path/to/trained/model.h5'\n\n# We need to create an SSDLoss object in order to pass that to the model loader.\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n 'L2Normalization': L2Normalization,\n 'compute_loss': ssd_loss.compute_loss})\n\"\"\"",
"_____no_output_____"
]
],
[
[
"## 3. Set up the data generators for the training\n\nThe code cells below set up the data generators for the training and validation datasets to train the model. The settings below reproduce the original SSD training on Pascal VOC 2007 `trainval` plus 2012 `trainval` and validation on Pascal VOC 2007 `test`.\n\nThe only thing you need to change here are the filepaths to the datasets on your local machine. Note that parsing the labels from the XML annotations files can take a while.\n\nNote that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images (around 9 GB total for Pascal VOC 2007 `trainval` plus 2012 `trainval` and another 2.6 GB for 2007 `test`). You can later load these HDF5 datasets directly in the constructor.\n\nThe original SSD implementation uses a batch size of 32 for the training. In case you run into GPU memory issues, reduce the batch size accordingly. You need at least 7 GB of free GPU memory to train an SSD300 with 20 object classes with a batch size of 32.\n\nThe `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.\n\nThe data augmentation settings defined further down reproduce the data augmentation pipeline of the original SSD training. The training generator receives an object `ssd_data_augmentation`, which is a transformation object that is itself composed of a whole chain of transformations that replicate the data augmentation procedure used to train the original Caffe implementation. The validation generator receives an object `resize`, which simply resizes the input images.\n\nAn `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs.\n\nIn order to train the model on a dataset other than Pascal VOC, either choose `DataGenerator`'s appropriate parser method that corresponds to your data format, or, if `DataGenerator` does not provide a suitable parser for your data format, you can write an additional parser and add it. Out of the box, `DataGenerator` can handle datasets that use the Pascal VOC format (use `parse_xml()`), the MS COCO format (use `parse_json()`) and a wide range of CSV formats (use `parse_csv()`).",
"_____no_output_____"
]
],
[
[
"# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.\n\n# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n\ntrain_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)\n\n# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n\n# The directories that contain the images.\nimages_dir = home_path + 'DATASET/JPEGImages/'\n\n# The directories that contain the annotations.\nannotations_dir = home_path + 'DATASET/Annotations/'\n\n# The paths to the image sets.\nval_image_set_filename = home_path + 'DATASET/val.txt'\ntrainval_image_set_filename = home_path + 'DATASET/trainval.txt'\n\n# The XML parser needs to now what object class names to look for and in which order to map them to integers.\nclasses = ['1m','2m','3m','4m','5m','6m','7m','8m','9m','1p','2p','3p','4p','5p','6p',\n '7p','8p','9p','1s','2s','3s','4s','5s','6s','7s','8s','9s',\n 'east','south','west','north','white','hatsu','tyun']\n\ntrain_dataset.parse_xml(images_dirs=[images_dir],\n image_set_filenames=[trainval_image_set_filename],\n annotations_dirs=[annotations_dir],\n classes=classes,\n include_classes='all',\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False)\n\nval_dataset.parse_xml(images_dirs=[images_dir],\n image_set_filenames=[val_image_set_filename],\n annotations_dirs=[annotations_dir],\n classes=classes,\n include_classes='all',\n exclude_truncated=False,\n exclude_difficult=True,\n ret=False)\n\n\n# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will\n# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`\n# option in the constructor, because in that cas the images are in memory already anyway. If you don't\n# want to create HDF5 datasets, comment out the subsequent two function calls.\n\ntrain_dataset.create_hdf5_dataset(file_path='DATASET_trainval.h5',\n resize=False,\n variable_image_size=True,\n verbose=True)\n\nval_dataset.create_hdf5_dataset(file_path='DATASET_test.h5',\n resize=False,\n variable_image_size=True,\n verbose=True)",
"_____no_output_____"
],
[
"# 3: Set the batch size.\n\nbatch_size = 2 # Change the batch size if you like, or if you run into GPU memory issues.\n\n# 4: Set the image transformations for pre-processing and data augmentation options.\n\n# For the training generator:\nssd_data_augmentation = SSDDataAugmentation(img_height=img_height,\n img_width=img_width,\n background=mean_color)\n\n# For the validation generator:\nconvert_to_3_channels = ConvertTo3Channels()\nresize = Resize(height=img_height, width=img_width)\n\n# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n model.get_layer('fc7_mbox_conf').output_shape[1:3],\n model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv9_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_per_layer=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.5,\n normalize_coords=normalize_coords)\n\n# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=True,\n transformations=[ssd_data_augmentation],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[convert_to_3_channels,\n resize],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\n# Get the number of samples in the training and validations datasets.\ntrain_dataset_size = train_dataset.get_dataset_size()\nval_dataset_size = val_dataset.get_dataset_size()\n\nprint(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\nprint(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))",
"_____no_output_____"
]
],
[
[
"## 4. Set the remaining training parameters\n\nWe've already chosen an optimizer and set the batch size above, now let's set the remaining training parameters. I'll set one epoch to consist of 1,000 training steps. The next code cell defines a learning rate schedule that replicates the learning rate schedule of the original Caffe implementation for the training of the SSD300 Pascal VOC \"07+12\" model. That model was trained for 120,000 steps with a learning rate of 0.001 for the first 80,000 steps, 0.0001 for the next 20,000 steps, and 0.00001 for the last 20,000 steps. If you're training on a different dataset, define the learning rate schedule however you see fit.\n\nI'll set only a few essential Keras callbacks below, feel free to add more callbacks if you want TensorBoard summaries or whatever. We obviously need the learning rate scheduler and we want to save the best models during the training. It also makes sense to continuously stream our training history to a CSV log file after every epoch, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Finally, we'll also add a callback that makes sure that the training terminates if the loss becomes `NaN`. Depending on the optimizer you use, it can happen that the loss becomes `NaN` during the first iterations of the training. In later iterations it's less of a risk. For example, I've never seen a `NaN` loss when I trained SSD using an Adam optimizer, but I've seen a `NaN` loss a couple of times during the very first couple of hundred training steps of training a new model when I used an SGD optimizer.",
"_____no_output_____"
]
],
[
[
"# Define a learning rate schedule.\n\ndef lr_schedule(epoch):\n if epoch < 80:\n return 0.001\n elif epoch < 100:\n return 0.0001\n else:\n return 0.00001",
"_____no_output_____"
],
[
"# Define model callbacks.\n\n# TODO: Set the filepath under which you want to save the model.\nmodel_checkpoint = ModelCheckpoint(filepath='trained_model/ssd512_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n period=1)\n#model_checkpoint.best = \n\ncsv_logger = CSVLogger(filename='trained_model/ssd512_training_log.csv',\n separator=',',\n append=True)\n\nlearning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n verbose=1)\n\nterminate_on_nan = TerminateOnNaN()\n\ncallbacks = [model_checkpoint,\n csv_logger,\n learning_rate_scheduler,\n terminate_on_nan]",
"_____no_output_____"
]
],
[
[
"## 5. Train",
"_____no_output_____"
],
[
"In order to reproduce the training of the \"07+12\" model mentioned above, at 1,000 training steps per epoch you'd have to train for 120 epochs. That is going to take really long though, so you might not want to do all 120 epochs in one go and instead train only for a few epochs at a time. You can find a summary of a full training [here](https://github.com/pierluigiferrari/ssd_keras/blob/master/training_summaries/ssd300_pascal_07%2B12_training_summary.md).\n\nIn order to only run a partial training and resume smoothly later on, there are a few things you should note:\n1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.\n2. In order for the learning rate scheduler callback above to work properly, `fit_generator()` needs to know which epoch we're in, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.\n3. In order for the model checkpoint callback above to work correctly after a kernel restart, set `model_checkpoint.best` to the best validation loss from the previous training. If you don't do this and a new `ModelCheckpoint` object is created after a kernel restart, that object obviously won't know what the last best validation loss was, so it will always save the weights of the first epoch of your new training and record that loss as its new best loss. This isn't super-important, I just wanted to mention it.",
"_____no_output_____"
]
],
[
[
"# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.\ninitial_epoch = 0\nfinal_epoch = 120\nsteps_per_epoch = 1000\n\nhistory = model.fit_generator(generator=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=final_epoch,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=ceil(val_dataset_size/batch_size),\n initial_epoch=initial_epoch)",
"_____no_output_____"
]
],
[
[
"## 6. Make predictions\n\nNow let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator that we've already set up above. Feel free to change the batch size.\n\nYou can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.",
"_____no_output_____"
]
],
[
[
"# 1: Set the generator for the predictions.\n\npredict_generator = val_dataset.generate(batch_size=1,\n shuffle=True,\n transformations=[convert_to_3_channels,\n resize],\n label_encoder=None,\n returns={'processed_images',\n 'filenames',\n 'inverse_transform',\n 'original_images',\n 'original_labels'},\n keep_images_without_gt=False)",
"_____no_output_____"
],
[
"# 2: Generate samples.\n\nbatch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(predict_generator)\n\ni = 0 # Which batch item to look at\n\nprint(\"Image:\", batch_filenames[i])\nprint()\nprint(\"Ground truth boxes:\\n\")\nprint(np.array(batch_original_labels[i]))",
"_____no_output_____"
],
[
"# 3: Make predictions.\n\ny_pred = model.predict(batch_images)",
"_____no_output_____"
]
],
[
[
"Now let's decode the raw predictions in `y_pred`.\n\nHad we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU).\n\n`decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions.",
"_____no_output_____"
]
],
[
[
"# 4: Decode the raw predictions in `y_pred`.\n\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.4,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)",
"_____no_output_____"
]
],
[
[
"We made the predictions on the resized images, but we'd like to visualize the outcome on the original input images, so we'll convert the coordinates accordingly. Don't worry about that opaque `apply_inverse_transforms()` function below, in this simple case it just aplies `(* original_image_size / resized_image_size)` to the box coordinates.",
"_____no_output_____"
]
],
[
[
"# 5: Convert the predictions for the original image.\n\ny_pred_decoded_inv = apply_inverse_transforms(y_pred_decoded, batch_inverse_transforms)\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nprint(\"Predicted boxes:\\n\")\nprint(' class conf xmin ymin xmax ymax')\nprint(y_pred_decoded_inv[i])",
"_____no_output_____"
]
],
[
[
"Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison.",
"_____no_output_____"
]
],
[
[
"# 5: Draw the predicted boxes onto the image\n\n# Set the colors for the bounding boxes\ncolors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist()\nclasses = ['1m','2m','3m','4m','5m','6m','7m','8m','9m','1p','2p','3p','4p','5p','6p',\n '7p','8p','9p','1s','2s','3s','4s','5s','6s','7s','8s','9s',\n 'east','south','west','north','white','hatsu','tyun']\n\nplt.figure(figsize=(20,12))\nplt.imshow(batch_original_images[i])\n\ncurrent_axis = plt.gca()\n\nfor box in batch_original_labels[i]:\n xmin = box[1]\n ymin = box[2]\n xmax = box[3]\n ymax = box[4]\n label = '{}'.format(classes[int(box[0])])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})\n\nfor box in y_pred_decoded_inv[i]:\n xmin = box[2]\n ymin = box[3]\n xmax = box[4]\n ymax = box[5]\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79a7b30efc600e4d18351117fb608da78999277 | 27,256 | ipynb | Jupyter Notebook | step-functions-data-science-sdk/machine_learning_workflow_abalone/machine_learning_workflow_abalone.ipynb | juliensimon/amazon-sagemaker-examples | 640720ef2278fea740435691d1294a5f09cf139f | [
"Apache-2.0"
] | 4 | 2020-03-26T14:26:13.000Z | 2021-06-29T11:14:58.000Z | step-functions-data-science-sdk/machine_learning_workflow_abalone/machine_learning_workflow_abalone.ipynb | GuillaumeSimo/amazon-sagemaker-examples | 640720ef2278fea740435691d1294a5f09cf139f | [
"Apache-2.0"
] | null | null | null | step-functions-data-science-sdk/machine_learning_workflow_abalone/machine_learning_workflow_abalone.ipynb | GuillaumeSimo/amazon-sagemaker-examples | 640720ef2278fea740435691d1294a5f09cf139f | [
"Apache-2.0"
] | 8 | 2020-12-14T15:49:24.000Z | 2022-03-23T18:38:36.000Z | 37.28591 | 684 | 0.60871 | [
[
[
"# Build a machine learning workflow using Step Functions and SageMaker\n\n1. [Introduction](#Introduction)\n1. [Setup](#Setup)\n1. [Build a machine learning workflow](#Build-a-machine-learning-workflow)\n",
"_____no_output_____"
],
[
"## Introduction\n\nThis notebook describes using the AWS Step Functions Data Science SDK to create and manage workflows. The Step Functions SDK is an open source library that allows data scientists to easily create and execute machine learning workflows using AWS Step Functions and Amazon SageMaker. For more information, see the following.\n* [AWS Step Functions](https://aws.amazon.com/step-functions/)\n* [AWS Step Functions Developer Guide](https://docs.aws.amazon.com/step-functions/latest/dg/welcome.html)\n* [AWS Step Functions Data Science SDK](https://aws-step-functions-data-science-sdk.readthedocs.io)\n\nIn this notebook we will use the SDK to create steps, link them together to create a workflow, and execute the workflow in AWS Step Functions. The first tutorial shows how to create an ML pipeline workflow, and the second shows how to run multiple experiments in parallel.",
"_____no_output_____"
]
],
[
[
"%%sh\npip -q install --upgrade stepfunctions",
"_____no_output_____"
]
],
[
[
"## Setup\n\n### Add a policy to your SageMaker role in IAM\n\n**If you are running this notebook on an Amazon SageMaker notebook instance**, the IAM role assumed by your notebook instance needs permission to create and run workflows in AWS Step Functions. To provide this permission to the role, do the following.\n\n1. Open the Amazon [SageMaker console](https://console.aws.amazon.com/sagemaker/). \n2. Select **Notebook instances** and choose the name of your notebook instance\n3. Under **Permissions and encryption** select the role ARN to view the role on the IAM console\n4. Choose **Attach policies** and search for `AWSStepFunctionsFullAccess`.\n5. Select the check box next to `AWSStepFunctionsFullAccess` and choose **Attach policy**\n\nIf you are running this notebook in a local environment, the SDK will use your configured AWS CLI configuration. For more information, see [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html).\n\nNext, create an execution role in IAM for Step Functions. \n\n### Create an execution role for Step Functions\n\nYou need an execution role so that you can create and execute workflows in Step Functions.\n\n1. Go to the [IAM console](https://console.aws.amazon.com/iam/)\n2. Select **Roles** and then **Create role**.\n3. Under **Choose the service that will use this role** select **Step Functions**\n4. Choose **Next** until you can enter a **Role name**\n5. Enter a name such as `StepFunctionsWorkflowExecutionRole` and then select **Create role**\n\n\nAttach a policy to the role you created. The following steps attach a policy that provides full access to Step Functions, however as a good practice you should only provide access to the resources you need. \n\n1. Under the **Permissions** tab, click **Add inline policy**\n2. Enter the following in the **JSON** tab\n\n```json\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"sagemaker:CreateTransformJob\",\n \"sagemaker:DescribeTransformJob\",\n \"sagemaker:StopTransformJob\",\n \"sagemaker:CreateTrainingJob\",\n \"sagemaker:DescribeTrainingJob\",\n \"sagemaker:StopTrainingJob\",\n \"sagemaker:CreateHyperParameterTuningJob\",\n \"sagemaker:DescribeHyperParameterTuningJob\",\n \"sagemaker:StopHyperParameterTuningJob\",\n \"sagemaker:CreateModel\",\n \"sagemaker:CreateEndpointConfig\",\n \"sagemaker:CreateEndpoint\",\n \"sagemaker:DeleteEndpointConfig\",\n \"sagemaker:DeleteEndpoint\",\n \"sagemaker:UpdateEndpoint\",\n \"sagemaker:ListTags\",\n \"lambda:InvokeFunction\",\n \"sqs:SendMessage\",\n \"sns:Publish\",\n \"ecs:RunTask\",\n \"ecs:StopTask\",\n \"ecs:DescribeTasks\",\n \"dynamodb:GetItem\",\n \"dynamodb:PutItem\",\n \"dynamodb:UpdateItem\",\n \"dynamodb:DeleteItem\",\n \"batch:SubmitJob\",\n \"batch:DescribeJobs\",\n \"batch:TerminateJob\",\n \"glue:StartJobRun\",\n \"glue:GetJobRun\",\n \"glue:GetJobRuns\",\n \"glue:BatchStopJobRun\"\n ],\n \"Resource\": \"*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"iam:PassRole\"\n ],\n \"Resource\": \"*\",\n \"Condition\": {\n \"StringEquals\": {\n \"iam:PassedToService\": \"sagemaker.amazonaws.com\"\n }\n }\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"events:PutTargets\",\n \"events:PutRule\",\n \"events:DescribeRule\"\n ],\n \"Resource\": [\n \"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTrainingJobsRule\",\n \"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTransformJobsRule\",\n \"arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTuningJobsRule\",\n \"arn:aws:events:*:*:rule/StepFunctionsGetEventsForECSTaskRule\",\n \"arn:aws:events:*:*:rule/StepFunctionsGetEventsForBatchJobsRule\"\n ]\n }\n ]\n}\n```\n\n3. Choose **Review policy** and give the policy a name such as `StepFunctionsWorkflowExecutionPolicy`\n4. Choose **Create policy**. You will be redirected to the details page for the role.\n5. Copy the **Role ARN** at the top of the **Summary**",
"_____no_output_____"
],
[
"### Configure execution roles",
"_____no_output_____"
]
],
[
[
"import sagemaker\n\n# SageMaker Execution Role\n# You can use sagemaker.get_execution_role() if running inside sagemaker's notebook instance\nsagemaker_execution_role = sagemaker.get_execution_role() #Replace with ARN if not in an AWS SageMaker notebook\n\n# paste the StepFunctionsWorkflowExecutionRole ARN from above\nworkflow_execution_role = 'arn:aws:iam::ACCOUNT_NUMBER:role/StepFunctionsWorkflowExecutionRole'",
"_____no_output_____"
]
],
[
[
"### Import the required modules",
"_____no_output_____"
]
],
[
[
"import boto3\nimport sagemaker\nimport time\nimport random\nimport uuid\nimport logging\nimport stepfunctions\nimport io\nimport random\nimport os\n\nfrom sagemaker.amazon.amazon_estimator import get_image_uri\nfrom stepfunctions import steps\nfrom stepfunctions.steps import TrainingStep, ModelStep, TransformStep\nfrom stepfunctions.inputs import ExecutionInput\nfrom stepfunctions.workflow import Workflow\nfrom stepfunctions.template import TrainingPipeline\nfrom stepfunctions.template.utils import replace_parameters_with_jsonpath\n\nsession = sagemaker.Session()\nstepfunctions.set_stream_logger(level=logging.INFO)\n\nregion = boto3.Session().region_name\nbucket = session.default_bucket()\nprefix = 'sagemaker/DEMO-xgboost-regression'\nbucket_path = 's3://{}/{}/'.format(bucket, prefix)",
"_____no_output_____"
]
],
[
[
"### Prepare the dataset",
"_____no_output_____"
],
[
"This notebook uses the XGBoost algorithm to train and host a regression model. We use the [Abalone data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html) originally from the [UCI data repository](https://archive.ics.uci.edu/ml/datasets/abalone). More details about the original dataset can be found [here](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names). In the libsvm converted [version](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html), the nominal feature (Male/Female/Infant) has been converted into a real valued feature. Age of abalone is to be predicted from eight physical measurements. ",
"_____no_output_____"
]
],
[
[
"try: #python3\n from urllib.request import urlretrieve\nexcept: #python2\n from urllib import urlretrieve\n\n# Load the dataset\nFILE_DATA = 'abalone'\nurlretrieve(\"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/abalone\", FILE_DATA)",
"_____no_output_____"
],
[
"import numpy as np\nfrom sklearn.datasets import load_svmlight_file, dump_svmlight_file\n\ndata = load_svmlight_file(FILE_DATA)\n\n# Split the downloaded data into train/test/validation files\nPERCENT_TRAIN = 70\nPERCENT_VALIDATION = 15\ntrain_data_x, validation_data_x, test_data_x = np.split(data[0].toarray(), \n [int(PERCENT_TRAIN * len(data)), \n int((PERCENT_TRAIN+PERCENT_VALIDATION)*len(data))])\ntrain_data_y, validation_data_y, test_data_y = np.split(data[1], \n [int(PERCENT_TRAIN * len(data)), \n int((PERCENT_TRAIN+PERCENT_VALIDATION)*len(data))])\n\n# Save the files\nFILE_TRAIN = 'abalone.train'\nFILE_VALIDATION = 'abalone.validation'\nFILE_TEST = 'abalone.test'\n\ndump_svmlight_file(train_data_x, train_data_y, FILE_TRAIN)\ndump_svmlight_file(validation_data_x, validation_data_y, FILE_VALIDATION)\ndump_svmlight_file(test_data_x, test_data_y, FILE_TEST)",
"_____no_output_____"
],
[
"# S3 files\ntrain_s3_file = os.path.join(prefix, 'train', FILE_TRAIN)\nvalidation_s3_file = os.path.join(prefix, 'train', FILE_VALIDATION)\ntest_s3_file = os.path.join(prefix, 'train', FILE_TEST)\n\n# Upload the three files to Amazon S3\ns3_client = boto3.client('s3')\ns3_client.upload_file(FILE_TRAIN, bucket, train_s3_file)\ns3_client.upload_file(FILE_VALIDATION, bucket, validation_s3_file)\ns3_client.upload_file(FILE_TEST, bucket, test_s3_file)\n\n# S3 URIs\ntrain_s3_file = 's3://{}/{}'.format(bucket, train_s3_file)\nvalidation_s3_file = 's3://{}/{}'.format(bucket, validation_s3_file)\ntest_s3_file = 's3://{}/{}'.format(bucket, test_s3_file)\noutput_s3 = 's3://{}/{}/{}/'.format(bucket, prefix, 'output')",
"_____no_output_____"
]
],
[
[
"### Configure the AWS Sagemaker estimator",
"_____no_output_____"
]
],
[
[
"xgb = sagemaker.estimator.Estimator(\n get_image_uri(region, 'xgboost', repo_version='0.90-2'),\n sagemaker_execution_role, \n train_instance_count = 1, \n train_instance_type = 'ml.m4.4xlarge',\n output_path = output_s3,\n sagemaker_session = session\n)\n\nxgb.set_hyperparameters(\n objective = 'reg:linear',\n num_round = 50,\n max_depth = 5,\n eta = 0.2,\n gamma = 4,\n min_child_weight = 6,\n subsample = 0.7,\n silent = 0\n)",
"_____no_output_____"
]
],
[
[
"\n## Build a machine learning workflow",
"_____no_output_____"
],
[
"<img src=\"img/e2e_pipeline.png\">",
"_____no_output_____"
],
[
"You can use a workflow to create a machine learning pipeline. The AWS Data Science Workflows SDK provides several AWS SageMaker workflow steps that you can use to construct an ML pipeline. In this tutorial you will use the Train and Transform steps.\n\n* [**TrainingStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) - Starts a Sagemaker training job and outputs the model artifacts to S3.\n* [**ModelStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) - Creates a model on SageMaker using the model artifacts from S3.\n* [**TransformStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TransformStep) - Starts a SageMaker transform job\n* [**EndpointConfigStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) - Defines an endpoint configuration on SageMaker.\n* [**EndpointStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointStep) - Deploys the trained model to the configured endpoint.",
"_____no_output_____"
],
[
"### Define the input schema for a workflow execution\n\nThe [**ExecutionInput**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/placeholders.html#stepfunctions.inputs.ExecutionInput) API defines the options to dynamically pass information to a workflow at runtime.\n\nThe following cell defines the fields that must be passed to your workflow when starting an execution.\n\nWhile the workflow is usually static after it is defined, you may want to pass values dynamically that are used by steps in your workflow. To help with this, the SDK provides a way to create placeholders when you define your workflow. These placeholders can be dynamically assigned values when you execute your workflow.\n\nExecutionInput values are accessible to each step of your workflow. You have the ability to define a schema for this placeholder collection, as shown in the cell below. When you execute your workflow the SDK will verify if the dynamic input conforms to the schema you defined.",
"_____no_output_____"
]
],
[
[
"# SageMaker expects unique names for each job, model and endpoint. \n# If these names are not unique the execution will fail. Pass these\n# dynamically for each execution using placeholders.\nexecution_input = ExecutionInput(schema={\n 'JobName': str, \n 'ModelName': str,\n 'EndpointName': str\n})",
"_____no_output_____"
]
],
[
[
"### Create the training step \n\nIn the following cell we create the training step and pass the estimator we defined above. See [TrainingStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) in the AWS Step Functions Data Science SDK documentation.",
"_____no_output_____"
]
],
[
[
"training_step = steps.TrainingStep(\n 'Train Step', \n estimator=xgb,\n data={\n 'train': sagemaker.s3_input(train_s3_file, content_type='libsvm'),\n 'validation': sagemaker.s3_input(validation_s3_file, content_type='libsvm')\n },\n job_name=execution_input['JobName']\n)",
"_____no_output_____"
]
],
[
[
"### Create the model step \n\nIn the following cell we define a model step that will create a model in SageMaker using the artifacts created during the TrainingStep. See [ModelStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) in the AWS Step Functions Data Science SDK documentation.\n\nThe model creation step typically follows the training step. The Step Functions SDK provides the [get_expected_model](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep.get_expected_model) method in the TrainingStep class to provide a reference for the trained model artifacts. Please note that this method is only useful when the ModelStep directly follows the TrainingStep.",
"_____no_output_____"
]
],
[
[
"model_step = steps.ModelStep(\n 'Save model',\n model=training_step.get_expected_model(),\n model_name=execution_input['ModelName'] \n)",
"_____no_output_____"
]
],
[
[
"### Create the transform step\n\nIn the following cell we create the transform step. See [TransformStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TransformStep) in the AWS Step Functions Data Science SDK documentation.",
"_____no_output_____"
]
],
[
[
"transform_step = steps.TransformStep(\n 'Transform Input Dataset',\n transformer=xgb.transformer(\n instance_count=1,\n instance_type='ml.m5.large'\n ),\n job_name=execution_input['JobName'], \n model_name=execution_input['ModelName'], \n data=test_s3_file,\n content_type='text/libsvm'\n)",
"_____no_output_____"
]
],
[
[
"### Create an endpoint configuration step\n\nIn the following cell we create an endpoint configuration step. See [EndpointConfigStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) in the AWS Step Functions Data Science SDK documentation.\n\n",
"_____no_output_____"
]
],
[
[
"endpoint_config_step = steps.EndpointConfigStep(\n \"Create Endpoint Config\",\n endpoint_config_name=execution_input['ModelName'],\n model_name=execution_input['ModelName'],\n initial_instance_count=1,\n instance_type='ml.m5.large'\n)",
"_____no_output_____"
]
],
[
[
"### Create an endpoint\n\nIn the following cell we create a step to deploy the trained model to an endpoint in AWS SageMaker. See [EndpointStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointStep) in the AWS Step Functions Data Science SDK documentation.",
"_____no_output_____"
]
],
[
[
"endpoint_step = steps.EndpointStep(\n \"Create Endpoint\",\n endpoint_name=execution_input['EndpointName'],\n endpoint_config_name=execution_input['ModelName']\n)",
"_____no_output_____"
]
],
[
[
"### Chain together steps for your workflow\n\nCreate your workflow definition by chaining the steps together. See [Chain](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.states.Chain) in the AWS Step Functions Data Science SDK documentation.",
"_____no_output_____"
]
],
[
[
"workflow_definition = steps.Chain([\n training_step,\n model_step,\n transform_step,\n endpoint_config_step,\n endpoint_step\n])",
"_____no_output_____"
]
],
[
[
"Create your workflow using the workflow definition above, and render the graph with [render_graph](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.render_graph).",
"_____no_output_____"
]
],
[
[
"from time import strftime, gmtime\ntimestamp = strftime('%d-%H-%M-%S', gmtime())\n\nworkflow = Workflow(\n name='{}-{}'.format('MyTrainTransformDeploy_v1', timestamp),\n definition=workflow_definition,\n role=workflow_execution_role,\n execution_input=execution_input\n)",
"_____no_output_____"
],
[
"workflow.render_graph()",
"_____no_output_____"
]
],
[
[
"Create the workflow in AWS Step Functions with [create](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.create).",
"_____no_output_____"
]
],
[
[
"workflow.create()",
"_____no_output_____"
]
],
[
[
"Run the workflow with [execute](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.execute).",
"_____no_output_____"
]
],
[
[
"execution = workflow.execute(\n inputs={\n 'JobName': 'regression-{}'.format(uuid.uuid1().hex), # Each Sagemaker Job requires a unique name\n 'ModelName': 'regression-{}'.format(uuid.uuid1().hex), # Each Model requires a unique name,\n 'EndpointName': 'regression-{}'.format(uuid.uuid1().hex) # Each Endpoint requires a unique name,\n }\n)",
"_____no_output_____"
]
],
[
[
"Render workflow progress with the [render_progress](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.render_progress).\n\nThis generates a snapshot of the current state of your workflow as it executes. This is a static image. Run the cell again to check progress. ",
"_____no_output_____"
]
],
[
[
"execution.render_progress()",
"_____no_output_____"
]
],
[
[
"Use [list_events](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.list_events) to list all events in the workflow execution.",
"_____no_output_____"
]
],
[
[
"execution.list_events(html=True)",
"_____no_output_____"
]
],
[
[
"Use [list_executions](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_executions) to list all executions for a specific workflow.",
"_____no_output_____"
]
],
[
[
"workflow.list_executions(html=True)",
"_____no_output_____"
]
],
[
[
"Use [list_workflows](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_workflows) to list all workflows in your AWS account.",
"_____no_output_____"
]
],
[
[
"Workflow.list_workflows(html=True)",
"_____no_output_____"
],
[
"template = workflow.get_cloudformation_template()\n\nwith open('workflow.json', 'w') as f:\n f.write(template)",
"_____no_output_____"
],
[
"!cat template.json",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e79a7b5a56d7d566d9ae182ed189439bce77085b | 48,050 | ipynb | Jupyter Notebook | docs/runtimes/mlrun_jobs.ipynb | jasonnIguazio/ghpages-mlrun | b3d719a6afa41a50401dc2f8f90390204278a6c7 | [
"Apache-2.0"
] | 1 | 2021-02-17T08:12:33.000Z | 2021-02-17T08:12:33.000Z | docs/runtimes/mlrun_jobs.ipynb | jasonnIguazio/ghpages-mlrun | b3d719a6afa41a50401dc2f8f90390204278a6c7 | [
"Apache-2.0"
] | null | null | null | docs/runtimes/mlrun_jobs.ipynb | jasonnIguazio/ghpages-mlrun | b3d719a6afa41a50401dc2f8f90390204278a6c7 | [
"Apache-2.0"
] | 1 | 2021-08-30T21:43:38.000Z | 2021-08-30T21:43:38.000Z | 36.651411 | 406 | 0.524079 | [
[
[
"# Kubernetes Jobs & Images\n\nThis topic describes running a kubernetes-based job using shared data, and building custom container images",
"_____no_output_____"
],
[
"## Define a New Function and its Dependencies\n\nDefine a single serverless function with two `handlers`, one for training and one for validation.",
"_____no_output_____"
]
],
[
[
"import mlrun ",
"> 2021-01-24 00:04:38,841 [warning] Failed resolving version info. Ignoring and using defaults\n> 2021-01-24 00:04:40,691 [warning] Unable to parse server or client version. Assuming compatible: {'server_version': 'unstable', 'client_version': 'unstable'}\n"
]
],
[
[
"Use the `%nuclio` magic commands to set package dependencies and configuration:",
"_____no_output_____"
]
],
[
[
"%nuclio cmd -c pip install pandas",
"_____no_output_____"
],
[
"import time\nimport pandas as pd\nfrom mlrun.artifacts import get_model, update_model\n\ndef training(\n context,\n p1: int = 1,\n p2: int = 2\n) -> None:\n \"\"\"Train a model.\n\n :param context: The runtime context object.\n :param p1: A model parameter.\n :param p2: Another model parameter.\n \"\"\"\n # access input metadata, values, and inputs\n print(f'Run: {context.name} (uid={context.uid})')\n print(f'Params: p1={p1}, p2={p2}')\n context.logger.info('started training')\n \n # <insert training code here>\n \n # log the run results (scalar values)\n context.log_result('accuracy', p1 * 2)\n context.log_result('loss', p1 * 3)\n \n # add a lable/tag to this run \n context.set_label('category', 'tests')\n \n # log a simple artifact + label the artifact \n # If you want to upload a local file to the artifact repo add src_path=<local-path>\n context.log_artifact('somefile', \n body=b'abc is 123', \n local_path='myfile.txt')\n \n # create a dataframe artifact \n df = pd.DataFrame([{'A':10, 'B':100}, {'A':11,'B':110}, {'A':12,'B':120}])\n context.log_dataset('mydf', df=df)\n \n # Log an ML Model artifact, add metrics, params, and labels to it\n # and place it in a subdir ('models') under artifacts path \n context.log_model('mymodel', body=b'abc is 123', \n model_file='model.txt', \n metrics={'accuracy':0.85}, parameters={'xx':'abc'},\n labels={'framework': 'xgboost'},\n artifact_path=context.artifact_subpath('models'))\n",
"_____no_output_____"
],
[
"def validation(\n context,\n model: mlrun.DataItem\n) -> None:\n \"\"\"Model validation.\n \n Dummy validation function.\n \n :param context: The runtime context object.\n :param model: The extimated model object.\n \"\"\"\n # access input metadata, values, files, and secrets (passwords)\n print(f'Run: {context.name} (uid={context.uid})')\n context.logger.info('started validation')\n \n # get the model file, class (metadata), and extra_data (dict of key: DataItem)\n model_file, model_obj, _ = get_model(model)\n\n # update model object elements and data\n update_model(model_obj, parameters={'one_more': 5})\n\n print(f'path to local copy of model file - {model_file}')\n print('parameters:', model_obj.parameters)\n print('metrics:', model_obj.metrics)\n context.log_artifact('validation', \n body=b'<b> validated </b>', \n format='html')",
"_____no_output_____"
]
],
[
[
"The following end-code annotation tells ```nuclio``` to stop parsing the notebook from this cell. _**Do not remove this cell**_:",
"_____no_output_____"
]
],
[
[
"# mlrun: end-code",
"_____no_output_____"
]
],
[
[
"______________________________________________",
"_____no_output_____"
],
[
"## Convert the Code to a Serverless Job\n\nCreate a ```function``` that defines the runtime environment (type, code, image, ..) and ```run()``` a job or experiment using that function.\nIn each run you can specify the function, inputs, parameters/hyper-parameters, etc.\n\nUse the ```job``` runtime for running container jobs, or alternatively use another distributed runner like MpiJob, Spark, Dask, and Nuclio.\n\n**Setting up the environment**",
"_____no_output_____"
]
],
[
[
"project_name, artifact_path = mlrun.set_environment(project='jobs-demo', artifact_path='./data/{{run.uid}}')",
"_____no_output_____"
]
],
[
[
"<a id=\"build\"></a>\n### **Define the cluster jobs and build images**\n\nTo use the function in a cluster you need to package the code and its dependencies.\n\nThe ```code_to_function``` call automatically generates a ```function``` object from the current notebook (or specified file) with its list of dependencies and runtime configuration.",
"_____no_output_____"
]
],
[
[
"# create an ML function from the notebook, attache it to iguazio data fabric (v3io)\ntrainer = mlrun.code_to_function(name='my-trainer', kind='job', image='mlrun/mlrun')",
"_____no_output_____"
]
],
[
[
"The functions need a shared storage media (file or object) to pass and store artifacts.\n\nYou can add _**Kubernetes**_ resources like volumes, environment variables, secrets, cpu/mem/gpu, etc. to a function.\n\n```mlrun``` uses _**KubeFlow**_ modifiers (apply) to configure resources. You can build your own resources or use predefined resources e.g. [AWS resources](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/aws.py).\n",
"_____no_output_____"
],
[
"#### _**Option 1: Using file volumes for artifacts**_\nIf you're using the [MLOps platform](https://www.iguazio.com/), use the `mount_v3io()` auto-mount modifier.<br>\nIf you're using another k8s PVC volume, use the `mlrun.platforms.mount_pvc(..)` modifier with the required parameters.\n\nThis example uses the `auto_mount()` modifier. It auto-selects between the k8s PVC volume and the Iguazio data fabric. You can set the PVC volume configuration with the env var below or with the auto_mount params:\n```\n MLRUN_PVC_MOUNT=<pvc-name>:<mount-path>\n```\n\nIf you apply `mount_v3io()` or `auto_mount()` when running the function in the MLOps platform, it attaches the function to Iguazio's real-time data fabric (mounted by default to _**home**_ of the current user).\n\n**Note**: If the notebook is not on the managed platform (it's running remotely) you may need to use secrets.",
"_____no_output_____"
],
[
"For the current ```training``` function, run:",
"_____no_output_____"
]
],
[
[
"# for PVC volumes set the env var for PVC: MLRUN_PVC_MOUNT=<pvc-name>:<mount-path>, pass the relevant parameters\nfrom mlrun.platforms import auto_mount\ntrainer.apply(auto_mount())",
"_____no_output_____"
]
],
[
[
"#### _**Option 2: Using AWS S3 for artifacts**_",
"_____no_output_____"
],
[
"When using AWS, you can use S3. You need a `secret` with AWS credentials. Create the AWS secret with the following command:",
"_____no_output_____"
],
[
"`kubectl create -n <namespace> secret generic my-aws --from-literal=AWS_ACCESS_KEY_ID=<access key> --from-literal=AWS_SECRET_ACCESS_KEY=<secret key>`",
"_____no_output_____"
],
[
"To use the secret:",
"_____no_output_____"
]
],
[
[
"# from kfp.aws import use_aws_secret",
"_____no_output_____"
],
[
"# trainer.apply(use_aws_secret(secret_name='my-aws'))\n# out = 's3://<your-bucket-name>/jobs/{{run.uid}}'",
"_____no_output_____"
]
],
[
[
"______________________________________________",
"_____no_output_____"
],
[
"## Deploy (build) the Function Container\n\nThe `deploy()` command builds a custom container image (creates a cluster build job) from the outlined function dependencies.\n\nIf a pre-built container image already exists, pass the `image` name instead. _**Note that the code and params can be updated per run without building a new image**_.\n\nThe image is stored in a container repository. By default it uses the repository configured on the MLRun API service. You can specify your own docker registry by first creating a secret, and adding that secret name to the build configuration:",
"_____no_output_____"
],
[
"`kubectl create -n <namespace> secret docker-registry my-docker --docker-server=https://index.docker.io/v1/ --docker-username=<your-user> --docker-password=<your-password> --docker-email=<your-email>`",
"_____no_output_____"
],
[
"And then run this: \n\n`trainer.build_config(image='target/image:tag', secret='my_docker')`",
"_____no_output_____"
]
],
[
[
"trainer.deploy(with_mlrun=False)",
"> 2021-01-24 00:05:18,384 [info] starting remote build, image: .mlrun/func-jobs-demo-my-trainer-latest\n\u001b[36mINFO\u001b[0m[0020] Retrieving image manifest mlrun/mlrun:unstable \n\u001b[36mINFO\u001b[0m[0020] Retrieving image manifest mlrun/mlrun:unstable \n\u001b[36mINFO\u001b[0m[0021] Built cross stage deps: map[] \n\u001b[36mINFO\u001b[0m[0021] Retrieving image manifest mlrun/mlrun:unstable \n\u001b[36mINFO\u001b[0m[0021] Retrieving image manifest mlrun/mlrun:unstable \n\u001b[36mINFO\u001b[0m[0021] Executing 0 build triggers \n\u001b[36mINFO\u001b[0m[0021] Unpacking rootfs as cmd RUN pip install pandas requires it. \n\u001b[36mINFO\u001b[0m[0037] RUN pip install pandas \n\u001b[36mINFO\u001b[0m[0037] Taking snapshot of full filesystem... \n\u001b[36mINFO\u001b[0m[0050] cmd: /bin/sh \n\u001b[36mINFO\u001b[0m[0050] args: [-c pip install pandas] \n\u001b[36mINFO\u001b[0m[0050] Running: [/bin/sh -c pip install pandas] \nRequirement already satisfied: pandas in /usr/local/lib/python3.7/site-packages (1.2.0)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/site-packages (from pandas) (2020.5)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/site-packages (from pandas) (2.8.1)\nRequirement already satisfied: numpy>=1.16.5 in /usr/local/lib/python3.7/site-packages (from pandas) (1.19.5)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/site-packages (from python-dateutil>=2.7.3->pandas) (1.15.0)\nWARNING: You are using pip version 20.2.4; however, version 21.0 is available.\nYou should consider upgrading via the '/usr/local/bin/python -m pip install --upgrade pip' command.\n\u001b[36mINFO\u001b[0m[0051] Taking snapshot of full filesystem... \n"
]
],
[
[
"## Run the Function on the Cluster\n\nUse ```with_code``` to inject the latest code into the function (without requiring a new build). ",
"_____no_output_____"
]
],
[
[
"trainer.with_code()",
"_____no_output_____"
],
[
"# run our training task with params\ntrain_run = trainer.run(name='my-training', handler='training', params={'p1': 9})",
"> 2021-01-24 00:09:14,760 [info] starting run my-training uid=30b8131285a74f87b16d957fabc5fac3 DB=http://mlrun-api:8080\n> 2021-01-24 00:09:14,928 [info] Job is running in the background, pod: my-training-lhtxt\n> 2021-01-24 00:09:18,972 [warning] Unable to parse server or client version. Assuming compatible: {'server_version': 'unstable', 'client_version': 'unstable'}\nRun: my-training (uid=30b8131285a74f87b16d957fabc5fac3)\nParams: p1=9, p2=2\n> 2021-01-24 00:09:19,050 [info] started training\n> 2021-01-24 00:09:19,299 [info] run executed, status=completed\nfinal state: completed\n"
],
[
"# running validation, use the model result from the previous step \nmodel = train_run.outputs['mymodel']\nvalidation_run = trainer.run(name='validation', handler='validation', inputs={'model': model}, watch=True)",
"> 2021-01-24 00:09:21,259 [info] starting run validation uid=c757ffcdc36d4412b4bcba1df75f079d DB=http://mlrun-api:8080\n> 2021-01-24 00:09:21,536 [info] Job is running in the background, pod: validation-dwd78\n> 2021-01-24 00:09:25,570 [warning] Unable to parse server or client version. Assuming compatible: {'server_version': 'unstable', 'client_version': 'unstable'}\nRun: validation (uid=c757ffcdc36d4412b4bcba1df75f079d)\n> 2021-01-24 00:09:25,719 [info] started validation\npath to local copy of model file - /User/data/30b8131285a74f87b16d957fabc5fac3/models/model.txt\nparameters: {'xx': 'abc', 'one_more': 5}\nmetrics: {'accuracy': 0.85}\n> 2021-01-24 00:09:25,873 [info] run executed, status=completed\nfinal state: completed\n"
]
],
[
[
"## Create and Run a Kubeflow Pipeline\n\nKubeflow pipelines are used for workflow automation, creating a graph of functions and their specified parameters, inputs, and outputs.\n\nYou can chain the outputs and inputs of the pipeline steps, as illustrated below.",
"_____no_output_____"
]
],
[
[
"import kfp\nfrom kfp import dsl\nfrom mlrun import run_pipeline",
"_____no_output_____"
],
[
"@dsl.pipeline(\n name = 'job test',\n description = 'demonstrating mlrun usage'\n)\ndef job_pipeline(\n p1: int = 9\n) -> None:\n \"\"\"Define our pipeline.\n \n :param p1: A model parameter.\n \"\"\"\n\n train = trainer.as_step(handler='training',\n params={'p1': p1},\n outputs=['mymodel'])\n \n validate = trainer.as_step(handler='validation',\n inputs={'model': train.outputs['mymodel']},\n outputs=['validation'])\n ",
"_____no_output_____"
]
],
[
[
"### Running the pipeline",
"_____no_output_____"
],
[
"Pipeline results are stored at the `artifact_path` location:",
"_____no_output_____"
],
[
"You can generate a unique folder per workflow by adding ```/{{workflow.uid}}``` to the path ```mlrun```.",
"_____no_output_____"
]
],
[
[
"artifact_path = 'v3io:///users/admin/kfp/{{workflow.uid}}/'",
"_____no_output_____"
],
[
"arguments = {'p1': 8}\nrun_id = run_pipeline(job_pipeline, arguments, experiment='my-job', artifact_path=artifact_path)",
"> 2021-01-24 00:09:46,670 [info] using in-cluster config.\n"
],
[
"from mlrun import wait_for_pipeline_completion, get_run_db\nwait_for_pipeline_completion(run_id)\ndb = get_run_db().list_runs(project=project_name, labels=f'workflow={run_id}').show()",
"_____no_output_____"
]
],
[
[
"### Viewing the Pipeline on the Dashboard (UI)\n\nIn the **Projects > Jobs and Workflows > Monitor Workflows** tab, press the workflow name to view a graph of the workflow. Press any step to open a pane with full details of the step: either the job's overview, inputs, artifacts, etc.; or the deploy / build function's overview, code, and log. The color of the step, after pressing, indicates the status. See the status description in the Log tab.\nThe graph is refreshed while the pipeline is running.",
"_____no_output_____"
],
[
"<img src=\"../_static/images/pipeline-monitor-workflow.png\" alt=\"pipeline\" width=\"700\"/>",
"_____no_output_____"
],
[
"[**Back to top**](#top)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e79a7ee26ba77cde2544d8f4fe62bcda8a48e2fc | 28,022 | ipynb | Jupyter Notebook | probability_plot_testing.ipynb | Songyosk/PM | 6386ef76c3fcdae394a23d9a54c1a2c86e926967 | [
"MIT"
] | null | null | null | probability_plot_testing.ipynb | Songyosk/PM | 6386ef76c3fcdae394a23d9a54c1a2c86e926967 | [
"MIT"
] | null | null | null | probability_plot_testing.ipynb | Songyosk/PM | 6386ef76c3fcdae394a23d9a54c1a2c86e926967 | [
"MIT"
] | null | null | null | 150.655914 | 22,476 | 0.865213 | [
[
[
"#Son-Gyo Jung; Tutor Group F; Code Script 2\nimport numpy.random as nr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pylab as pl\n\n\n#list and matrix to store data\ndisks = 50\ntest = 100\nprobability_data = np.zeros((disks,2))\n\nfor i in range(disks):\n probability_data[i][0] = i\n\n \ndef joincluster(c1, c2, raw_clusters):\n \"\"\"\n Joining raw_clusters into bigger clusters \n \"\"\"\n for c in raw_clusters[c2]:\n if c in raw_clusters[c1]:\n raw_clusters[c1] += raw_clusters[c2]\n raw_clusters[c2] = []\n \n \ndef findclusters(n, radius, d):\n #Random coordinates generator\n random = nr.uniform(size=(n,d))\n\n #List of disks numbers that form a cluster\n raw_clusters = []\n\n for p1 in range(n):\n for p2 in range((p1+1),n): \n Cx = random[p1][0] - random[p2][0]\n Cy = random[p1][1] - random[p2][1]\n\n if d is 3:\n Cz = random[p1][2] - random[p2][2]\n distance = np.sqrt(Cx*Cx + Cy*Cy + Cz*Cz)\n\n distance = np.sqrt(Cx*Cx + Cy*Cy)\n\n if distance <= radius*2.0 and distance !=0.0:\n if ([p1,p2]) and ([p2,p1]) not in raw_clusters:\n raw_clusters.append([p1,p2])\n\n #List of disks touching x=0\n left = []\n\n for p1 in range(n):\n left_x = random[p1][0]\n\n if left_x <= radius:\n left.append(p1)\n\n\n #List of disks touching x=1\n right = [] \n for p1 in range(n):\n right_x = 1.0 - random[p1][0]\n if right_x <= radius:\n right.append(p1)\n \n \n for i in range(len(raw_clusters)):\n for j in range(len(raw_clusters)):\n if i!=j:\n joincluster(i, j, raw_clusters)\n\n\n merged_clusters=[] \n\n raw_clusters=[x for x in raw_clusters if x != []]\n for i in range(len(raw_clusters)):\n merged_clusters.append(list(set(raw_clusters[i]))) \n\n\n #Coordinates of merged_clusters into the library\n clist = []\n\n for i in range(len(merged_clusters)):\n clist.append({'DiskID':merged_clusters[i], 'touch_left':'False', 'touch_right':'False'})\n\n\n #Checking if it touches x=0\n for i in range(len(clist)):\n for j in range(len(clist[i]['DiskID'])):\n if clist[i]['DiskID'][j] in left:\n clist[i]['touch_left']='True'\n\n\n #Checking if it touches x=1\n for i in range(len(clist)):\n for j in range(len(clist[i]['DiskID'])):\n if clist[i]['DiskID'][j] in right:\n clist[i]['touch_right']='True'\n \n \n #Collecting data for the probability graph \n for k in range(len(clist)):\n if clist[k]['touch_left']=='True' and clist[k]['touch_right']=='True':\n probability_data[number_of_disks][1] = 1.0 + probability_data[number_of_disks][1]\n\n\nfor number_of_test in range(test): \n for number_of_disks in range(disks):\n findclusters(number_of_disks, 0.10, 2)\n \n\nx = probability_data[:, 0]\ny = probability_data[:, 1]/test\n\n\ndef smooth(y, box_pts):\n box = np.ones(box_pts)/box_pts\n y_smooth = np.convolve(y, box, mode='same')\n \n return y_smooth\n\n\npl.title('Probability Graph')\nplt.ylabel('Probability of forming a continuous path')\nplt.xlabel('Number Density of the Disks')\nplt.plot(x, y,'x')\nplt.plot(x, smooth(y,1), 'g-', lw=2)\nplt.show()\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e79a80c739faf1d7f9a35c4243f57419d82b9f39 | 140,618 | ipynb | Jupyter Notebook | scratch work/Yuqing-Data-Merge/Scenario2-v9.ipynb | thinkhow/Market-Prediction-with-Macroeconomics-features | feac711017739ea6ffe46a7fcac6b4b0c265e0b5 | [
"MIT"
] | null | null | null | scratch work/Yuqing-Data-Merge/Scenario2-v9.ipynb | thinkhow/Market-Prediction-with-Macroeconomics-features | feac711017739ea6ffe46a7fcac6b4b0c265e0b5 | [
"MIT"
] | 1 | 2021-05-24T00:26:34.000Z | 2021-05-24T00:26:34.000Z | scratch work/Yuqing-Data-Merge/Scenario2-v9.ipynb | thinkhow/Market-Prediction-with-Macroeconomics-features | feac711017739ea6ffe46a7fcac6b4b0c265e0b5 | [
"MIT"
] | null | null | null | 102.416606 | 99,576 | 0.808787 | [
[
[
"# Gradient descent algorithm for Scenario 2\n\n\nIn this part, we implement an gradient descent algorithm to optimization the objective loss function in Scenario 2:\n\n\n$$\\min F := \\min \\frac{1}{2(n-i)} \\sum_{i=1000}^n (fbpredic(i) + a*tby(i) +b*ffr(i) + c*fta(i) - asp(i))^2$$\n\nGradient descent: \n\n$$ \\beta_k = \\beta_{k-1} + \\delta* \\nabla F, $$\nwhere $\\delta$ control how far does each iteration go.\n\n\n### Detailed plan\n\nFirst, split the data as train and test with 80% and 20% respectively. For the training part, we need prophet() predicted price, there are a couple of issues. One is prophet() can not predict too far in the future. The other is we can not call prophet() too many times, this takes a lot of time. So we will use a sliding window strategy:\n\n1, Split the train data as train_1 and train_2, where train_1 is used as a sliding window to fit prophet(), and give predictions in train_2. Train_2 is used train the model we proposed above.\n\n2, After we got full size (size of train_2) predictions from prophet(), then we use gradient descent to fit the above model, extracting the coefficients of features to make predicution in the testing data.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import FunctionTransformer\nfrom numpy import meshgrid\n\n## For plotting\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nimport datetime as dt\nimport seaborn as sns\nsns.set_style(\"whitegrid\")",
"_____no_output_____"
],
[
"df= pd.read_csv('df7.csv', parse_dates=['Date'])\ndf = df.rename(columns = {\"Date\":\"ds\",\"Close\":\"y\"}) \ndf \n# len(df)",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"from datetime import datetime\np = 0.9\n# Train around 90% of dataset \ncutoff = int((p*len(df)//100)*100)\ndf_train = df[:cutoff].copy()\ndf_test = df.drop(df_train.index).copy()\n\nprint(df_train, df_test)",
" ds y tby_sqsq une_div_eps_vix_fta\n0 2005-06-20 1216.10 285.343042 8.989853e+10\n1 2005-06-21 1213.61 271.709069 9.032286e+10\n2 2005-06-22 1213.88 243.438006 8.984219e+10\n3 2005-06-23 1200.73 245.912579 9.111354e+10\n4 2005-06-24 1191.57 236.126249 9.202165e+10\n... ... ... ... ...\n3495 2019-06-20 2954.18 16.322408 6.065473e+10\n3496 2019-06-21 2950.46 18.360368 6.235005e+10\n3497 2019-06-24 2945.35 16.649664 6.137054e+10\n3498 2019-06-25 2917.38 16.000000 6.287749e+10\n3499 2019-06-26 2913.78 17.661006 6.207108e+10\n\n[3500 rows x 4 columns] ds y tby_sqsq une_div_eps_vix_fta\n3500 2019-06-27 2924.92 16.322408 6.038335e+10\n3501 2019-06-28 2941.76 16.000000 5.888314e+10\n3502 2019-07-01 2964.33 16.981817 5.581583e+10\n3503 2019-07-02 2973.01 15.369536 5.327365e+10\n3504 2019-07-03 2995.82 14.757891 5.215275e+10\n... ... ... ... ...\n3893 2021-01-25 3855.36 1.215506 1.826285e+11\n3894 2021-01-26 3849.62 1.215506 1.787428e+11\n3895 2021-01-27 3750.77 1.169859 2.331807e+11\n3896 2021-01-28 3787.38 1.310796 2.151189e+11\n3897 2021-01-29 3714.24 1.518070 2.317696e+11\n\n[398 rows x 4 columns]\n"
]
],
[
[
" Use prophet() to make predictions, we will split training as train_1 and train_2 with ratio 40% vs 60%, \n train_1 will be used to fit prophet(), then predict on train_2. Getting the predictions, feed the data into\n the Scenario 2 model, train again to get the parameters a,b,c,....",
"_____no_output_____"
]
],
[
[
"#prophet part \nfrom fbprophet import Prophet\nstart = 1000 # 1000 # the number of initial data for training \npred_size =100 # predicted periods \nnum_winds = int((df_train.shape[0]-start)/pred_size) #(4000-3000)/100 =30\n\npro_pred = []\n\n# use accumulated data to predict the next pred_size data\nfor i in range(num_winds):\n tmp_train = df_train.iloc[: start+ i*pred_size].copy()\n \n fbp = Prophet(daily_seasonality=True)\n # fit close price using fbprophet model\n fbp.fit(tmp_train[['ds','y']])\n \n # predict pred_size futures and get the forecast price \n fut = fbp.make_future_dataframe(periods = pred_size,)\n tmp_forecast = fbp.predict(fut) \n \n # only require the forcast on test data of temporary training data\n pred = tmp_forecast[start+ i*pred_size:].yhat\n pro_pred.append(pred)\n ",
"_____no_output_____"
],
[
"pro_pred",
"_____no_output_____"
],
[
"flat_pro_pred = [item for l1 in pro_pred for item in l1]",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"df= pd.read_csv('df7.csv', parse_dates=['Date'])\ndf = df.rename(columns = {\"Date\":\"ds\",\"Close\":\"y\"}) ",
"_____no_output_____"
],
[
"df['tby_sqsq'] = df['tby']**2\n# df['eps_sqrt'] = np.sqrt(df['eps'])\ndf['une_div_vix'] =df['une'] * df['div'] * df['vix']\n\ndf = df.drop(columns=['tby','ffr', 'div', 'une','vix'])",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
],
[
"possible_features = ['fta', 'eps', 'tby_sqsq', 'une_div_vix']",
"_____no_output_____"
],
[
"df_train = df[:cutoff].copy()\ndf_test = df[cutoff:].copy()\n",
"_____no_output_____"
],
[
"from sklearn.linear_model import LinearRegression\nreg = LinearRegression(fit_intercept=False, normalize=True, copy_X = True)\nreg.fit(df_train[start:cutoff][possible_features], df_train[start:cutoff]['y'] - flat_pro_pred)",
"_____no_output_____"
],
[
"coef = []\nfor i in range(len(possible_features)):\n coef.append(np.round(reg.coef_[i],5))\n\nprint(coef)",
"[4e-05, -1.75907, 13.27792, -0.14746]\n"
],
[
"# Forecast the Test Data\nfrom fbprophet import Prophet\ntest_time = int((1-p)* len(df))\nfbp = Prophet(daily_seasonality=True)\nfbp.fit(df_train[['ds','y']])\nfut = fbp.make_future_dataframe(periods = test_time,)\nforecast = fbp.predict(fut)\n\npred_test = forecast[cutoff:cutoff+test_time].yhat\npred_test = pred_test.ravel()",
"_____no_output_____"
],
[
"len(pred_test)",
"_____no_output_____"
],
[
"pp_test = pred_test.copy() # predicted price on testing data \npp_train = flat_pro_pred.copy() # predicted price on training data \nfor i in range(len(possible_features)):\n pp_test += coef[i] * df_test[df_test.columns[i+2]][:test_time].ravel()\n pp_train += coef[i] * df_train[df_train.columns[i+2]][start:].ravel()\n",
"_____no_output_____"
],
[
"from sklearn.metrics import mean_squared_error as MSE\n# MSE for test data\n# Actual close price: df_test[:test_time].y \n# Predicted price by prophet: pred_test\n# Predicted price by tuning \nmse1 = MSE(df_test[:test_time].y,pred_test) #\nmse2 = MSE(df_test[:test_time].y, pp_test)\nprint(mse1,mse2)",
"67923.0178358458 50404.66200315674\n"
],
[
"# MSE for train data\nmse3 = MSE(df_train[start:].y, flat_pro_pred)\nmse4 = MSE(df_train[start:].y, pp_train)\nprint(mse3,mse4)",
"19362.380200429267 16346.697227941522\n"
],
[
"train_pred_yhat = [np.nan for i in range(start)] + flat_pro_pred\ntrain_pp_train = [np.nan for i in range(start)] + pp_train.tolist()\n\n",
"_____no_output_____"
],
[
"train_date = df_train[['ds']].to_numpy().ravel()\ntrain_date",
"_____no_output_____"
],
[
"fc_train = pd.DataFrame(data={'ds':train_date,'fbsp':train_pred_yhat, 'imsp': train_pp_train})\nfc_train",
"_____no_output_____"
],
[
"m = len(forecast) -cutoff\ntest_pred_yhat = forecast.loc[cutoff:].yhat.copy().to_numpy().ravel()\ntest_date = df_test[['ds']][:m].to_numpy().ravel()",
"_____no_output_____"
],
[
"fc_test = pd.DataFrame(data={'ds':test_date, 'fbsp':test_pred_yhat, 'imsp': pp_test.tolist() })\nfc_test",
"_____no_output_____"
],
[
"plt.figure(figsize=(18,10))\n\n# plot the training data\nplt.plot(df_train.ds,df_train.y,'b',\n label = \"Training Data\")\n\nplt.plot(df_train.ds, fc_train.imsp,'g-',\n label = \"Improved Fitted Values\")\n\n# plot the fit\nplt.plot(df_train.ds, fc_train.fbsp,'r-',\n label = \"FB Fitted Values\")\n\n# # plot the forecast\nplt.plot(df_test[:m].ds, fc_test.fbsp,'r--',\n label = \"FB Forecast\")\nplt.plot(df_test[:m].ds, fc_test.imsp,'g--',\n label = \"Improved Forecast\")\nplt.plot(df_test[:m].ds,df_test[:m].y,'b--',\n label = \"Test Data\")\n\nplt.legend(fontsize=14)\n\nplt.xlabel(\"Date\", fontsize=16)\nplt.ylabel(\"SP&500 Close Price\", fontsize=16)\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79a987d1fabda79f6e6cb18745b3b9bd1b172a8 | 6,295 | ipynb | Jupyter Notebook | get_total_counts.ipynb | abigailStev/MAXIJ1535_QPO | cefa4342d4d5140c9508b3099d7184f8d447eae7 | [
"BSD-3-Clause"
] | 1 | 2020-05-02T15:05:47.000Z | 2020-05-02T15:05:47.000Z | get_total_counts.ipynb | astrojuan/MAXIJ1535_QPO | cefa4342d4d5140c9508b3099d7184f8d447eae7 | [
"BSD-3-Clause"
] | null | null | null | get_total_counts.ipynb | astrojuan/MAXIJ1535_QPO | cefa4342d4d5140c9508b3099d7184f8d447eae7 | [
"BSD-3-Clause"
] | 1 | 2022-03-07T15:07:12.000Z | 2022-03-07T15:07:12.000Z | 25.18 | 87 | 0.524067 | [
[
[
"from astropy.io import fits\nimport numpy as np\nimport os",
"_____no_output_____"
],
[
"homedir = os.path.expanduser('~')\nfile_list = homedir + \"/Dropbox/Research/MAXIJ1535_B-QPO/early_evtlists.txt\"\ndata_dir = homedir + \"/Reduced_data/MAXIJ1535_event_cl\"\nassert os.path.isfile(file_list)\n\ndata_files = [line.strip() for line in open(file_list)]\nif not data_files: ## If data_files is an empty list\n raise Exception(\"ERROR: No files in the list of event lists: \"\\\n \"%s\" % file_list)",
"_____no_output_____"
],
[
"counts = 0\nexposure = 0\nfor evtfile in data_files:\n print(evtfile)\n if evtfile[0] == 'n':\n evtfile = data_dir + \"/\" + evtfile\n hdulist = fits.open(evtfile, memmap=True)\n evtheader = hdulist[1].header\n n_events = evtheader['NAXIS2']\n len_exposure = evtheader['EXPOSURE']\n hdulist.close()\n counts += n_events\n exposure += len_exposure\n print(\"Events: %d\" % n_events)\n print(\"Exposure: %.3f\" % len_exposure)\n print(\"\")\n# break",
"ni1050360101_0mpu7_cl.evt.gz\nEvents: 45\nExposure: 24.000\n\nni1050360102_0mpu7_cl.evt.gz\nEvents: 351\nExposure: 62.000\n\nni1050360103_0mpu7_cl.evt.gz\nEvents: 418851\nExposure: 133.000\n\nni1050360104_0mpu7_cl.evt.gz\nEvents: 37120845\nExposure: 4757.000\n\nni1050360105_0mpu7_cl.evt.gz\nEvents: 75799101\nExposure: 9663.000\n\nni1050360106_0mpu7_cl.evt.gz\nEvents: 41523646\nExposure: 5608.000\n\nni1050360107_0mpu7_cl.evt.gz\nEvents: 13294044\nExposure: 1546.000\n\nni1050360108_0mpu7_cl.evt.gz\nEvents: 31783280\nExposure: 3339.164\n\nni1050360109_0mpu7_cl.evt.gz\nEvents: 43569122\nExposure: 4022.000\n\nni1050360110_0mpu7_cl.evt.gz\nEvents: 38761137\nExposure: 3061.000\n\nni1050360111_0mpu7_cl.evt.gz\nEvents: 33034670\nExposure: 1935.000\n\nni1050360112_0mpu7_cl.evt.gz\nEvents: 46814279\nExposure: 2668.000\n\nni1050360113_0mpu7_cl.evt.gz\nEvents: 97639356\nExposure: 5600.000\n\nni1050360114_0mpu7_cl.evt.gz\nEvents: 36246051\nExposure: 2081.000\n\nni1050360115_0mpu7_cl.evt.gz\nEvents: 161107745\nExposure: 9331.000\n\nni1050360116_0mpu7_cl.evt.gz\nEvents: 107057697\nExposure: 6324.000\n\nni1050360120_0mpu7_cl.evt.gz\nEvents: 59797725\nExposure: 3572.000\n\nni1050360117_0mpu7_cl.evt.gz\nEvents: 78421303\nExposure: 4737.000\n\nni1050360118_0mpu7_cl.evt.gz\nEvents: 58475906\nExposure: 3570.000\n\nni1050360119_0mpu7_cl.evt.gz\nEvents: 55745102\nExposure: 3567.000\n\nni1130360101_0mpu7_cl.evt.gz\nEvents: 32876401\nExposure: 2154.000\n\nni1130360102_0mpu7_cl.evt.gz\nEvents: 20696331\nExposure: 1375.000\n\nni1130360103_0mpu7_cl.evt.gz\nEvents: 42988725\nExposure: 2994.000\n\nni1130360104_0mpu7_cl.evt.gz\nEvents: 18643581\nExposure: 1499.000\n\nni1130360105_0mpu7_cl.evt.gz\nEvents: 63160849\nExposure: 5088.000\n\nni1130360106_0mpu7_cl.evt.gz\nEvents: 46427270\nExposure: 3681.000\n\nni1130360107_0mpu7_cl.evt.gz\nEvents: 42468980\nExposure: 3916.000\n\nni1130360108_0mpu7_cl.evt.gz\nEvents: 38412431\nExposure: 3409.000\n\nni1130360109_0mpu7_cl.evt.gz\nEvents: 33812682\nExposure: 2649.000\n\nni1130360110_0mpu7_cl.evt.gz\nEvents: 76714292\nExposure: 6419.000\n\nni1130360111_0mpu7_cl.evt.gz\nEvents: 69665771\nExposure: 5689.000\n\nni1130360112_0mpu7_cl.evt.gz\nEvents: 35973788\nExposure: 3436.000\n\nni1130360113_0mpu7_cl.evt.gz\nEvents: 30187173\nExposure: 3114.000\n\nni1130360114_0mpu7_cl.evt.gz\nEvents: 11749237\nExposure: 1330.000\n\n"
],
[
"print(counts)\nprint(exposure)",
"1580387767\n122353.16419035196\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e79abbc9b4423fc809373eab4917151b0154a1be | 221,260 | ipynb | Jupyter Notebook | notebooks/16_pandas.ipynb | sniafas/python_ml_tutorial | 9c240cfb92c1c823b8044a942b760f49143634e4 | [
"MIT"
] | 100 | 2019-03-23T13:23:48.000Z | 2022-02-06T03:57:40.000Z | notebooks/16_pandas.ipynb | nikolis7/python_ml_tutorial | d1189dff44a29d1054504071892d34da356f092b | [
"MIT"
] | 1 | 2020-02-23T14:00:59.000Z | 2020-02-26T11:57:38.000Z | notebooks/16_pandas.ipynb | nikolis7/python_ml_tutorial | d1189dff44a29d1054504071892d34da356f092b | [
"MIT"
] | 25 | 2019-04-18T22:48:02.000Z | 2022-02-19T18:46:43.000Z | 27.492545 | 519 | 0.385334 | [
[
[
"# Data Analysis in Python\n\nIn this session we will learn how to properly utilize python's [pandas](https://pandas.pydata.org/) library for data transforming, cleaning, filtering and exploratory data analysis.\n\n## Pandas\n\nPython's Data Analysis Library\n\nPython has long been great for data munging and preparation, but less so for data analysis and modeling. *Pandas* helps fill this gap, enabling you to carry out your entire data analysis workflow in Python.\n\nPandas is built on top of *numpy* aiming at providing higher-level functionality as well as a new data structure that works well with tabular data with heterogenous-typed columns (e.g. Excel spreadsheets, SQL tables).\n\n### Data Structures\n\nPandas introduces two new data structures to Python: the **Series** and the **DataFrame**. Both of which are built on top of NumPy.\n\n### Series\n\nA **series**, in *pandas* is a one-dimensional *ndarray* with axis labels. The axis labels are collectively referred to as the **index**. The labels facilitate in allowing us to refer to the elements in the series either by their position (like in a list or an array) or by their label (like in a dictionary).\n\nThe basic method to create a `pd.Series` is to call:\n```python\ns = pd.Series(data, index=index)\n```\nwhere *data* is most commonly a dictionary (where the keys will be used as the `index` and the values as the elements) or a `numpy.array` and `index` is a *list* of labels.",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\nimport pandas as pd # for simplicity we usually refer to pandas as pd\nimport numpy as np\n\ns = pd.Series([1,3,5,np.nan,6,8], index=['a', 'b', 'c', 'd', 'e', 'f'])\n# By passing a list as the only argument in series, we let pandas create a default integer index\nprint(s)",
"a 1.0\nb 3.0\nc 5.0\nd NaN\ne 6.0\nf 8.0\ndtype: float64\n"
]
],
[
[
"Like arrays, a series can only have one `dtype` (in this case `float64`). \n\nAs we mentioned previously, indexing elements in the *Series* can be done either through their position or through their label. ",
"_____no_output_____"
]
],
[
[
"print(s[4]) # position \nprint(s['e']) # label",
"6.0\n6.0\n"
]
],
[
[
"If we don't set an `index` during the creation of the *Series*, the labels will be set to the position of each element. ",
"_____no_output_____"
]
],
[
[
"s = pd.Series([1,3,5,np.nan,6,8])\nprint(s)",
"0 1.0\n1 3.0\n2 5.0\n3 NaN\n4 6.0\n5 8.0\ndtype: float64\n"
]
],
[
[
"The last is the most common use of a series.\n\nWe can easily keep the underlying `np.array` containing just the values of the *Series*. ",
"_____no_output_____"
]
],
[
[
"s.values # a np.array with the values of the Series",
"_____no_output_____"
]
],
[
[
"A **DataFrame** is a two-dimensional labeled data structure with columns of potentially different types. You can think of it like a spreadsheet. It is organized in such a way that it is essentially a collection of `pd.Series`, where each series is a column. This way each column must have a **single** data type, but the data type can **differ from column to column**.\n\nA *DataFrame* can have labels for both its rows and its columns, however we usually prefer to label **only the columns** and leave the rows to have their position as their labels. \n\nThe easiest way to create a *DataFrame* is to pass in a dictionary of objects.",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({'A' : 1, # repeats integer for the length of the dataframe\n 'B' : pd.Timestamp('20190330'), # timestamp datatype, repeats it for the length of the dataframe\n 'C' : pd.Series(range(4), dtype='float32'), # creates a series of ones and uses it as a column\n 'D' : np.array([3] * 4,dtype='int32'), # np.array as a column\n 'E' : pd.Categorical([\"test\",\"train\",\"test\",\"train\"]), # categorical data type\n 'F' : 'foo' }) # string, repeats it for the length of the data frame\n \ndf # renders better in jupyter if we don't use print",
"_____no_output_____"
]
],
[
[
"## DataFrame inspection\n\nIn most cases the *DataFrames* are thousands of rows long, we can't view all the data at once.\n\n- Look at the **first** entries.",
"_____no_output_____"
]
],
[
[
"df.head() # prints first entries (by default 5)",
"_____no_output_____"
]
],
[
[
"- Look at the **last** entries.",
"_____no_output_____"
]
],
[
[
"df.tail(3) # prints last 3 entries",
"_____no_output_____"
]
],
[
[
"- Look at entries at **random**.",
"_____no_output_____"
]
],
[
[
"df.sample(2) # prints two random entries",
"_____no_output_____"
]
],
[
[
"### Information about the *DataFrame*\n\nThe two main attributes of a *DataFrame* are:\n\n- Its `shape`. *DataFrames* are always two-dimensional, so the only information this provides is the **number of rows and samples**.\n- Its `dtypes`, which shows the data type of each of the columns.",
"_____no_output_____"
]
],
[
[
"print('shape:', df.shape) # prints the shape of the dataframe\nprint(df.dtypes) # prints the data type of each column",
"shape: (4, 6)\nA int64\nB datetime64[ns]\nC float32\nD int32\nE category\nF object\ndtype: object\n"
]
],
[
[
"Another important attribute of the *DataFrame* is the labelling on its rows and columns. ",
"_____no_output_____"
]
],
[
[
"print('Row names: ', df.index)\nprint('Column names:', df.columns)",
"Row names: RangeIndex(start=0, stop=4, step=1)\nColumn names: Index(['A', 'B', 'C', 'D', 'E', 'F'], dtype='object')\n"
]
],
[
[
"### Statistical summary of numeric columns\n\nWe can also easily view a statistical description of our data (only the columns with numeric data types).",
"_____no_output_____"
]
],
[
[
"df.describe() # only numerical features appear when doing this",
"_____no_output_____"
]
],
[
[
"## Indexing data\n\nSince *DataFrames* support both indexing through labels and through position we have two main ways of getting an item.\n\n### **Positional** indexing.\n\nThis is done through `.iloc`, which requires two arguments: the position of the desired element's row and the position of its column. `.iloc` essentially allows us to use the *DataFrame* as an array.",
"_____no_output_____"
]
],
[
[
"df.iloc[3, 2] # element in the 4th row of the 3rd column",
"_____no_output_____"
]
],
[
[
"Slicing works the same way it does in *numpy*.",
"_____no_output_____"
]
],
[
[
"df.iloc[::2, -3:] # odd rows, last three columns",
"_____no_output_____"
]
],
[
[
"As does indexing through lists.",
"_____no_output_____"
]
],
[
[
"df.iloc[[0, 3], [1, 3, 4]] # 1st and 4th row; 2nd, 4th and 5th columns",
"_____no_output_____"
]
],
[
[
"### Indexing with labels\n\nWe can use the row and column labels to access an element through `.loc`. Remember, if we haven't assigned any labels to the rows, their labels will be the same as their position.",
"_____no_output_____"
]
],
[
[
"df.loc[3, 'C'] # element in the row with the label 3 and the column with the label 'C'",
"_____no_output_____"
]
],
[
[
"Slicing also works!",
"_____no_output_____"
]
],
[
[
"df.loc[::2, 'B':'D'] # odd rows, columns 'B' through 'D'",
"_____no_output_____"
]
],
[
[
"And even indexing through lists.",
"_____no_output_____"
]
],
[
[
"df.loc[[0, 3], ['B', 'D', 'E']] # 1st and 4th row; columns 'B', 'D', and 'E'",
"_____no_output_____"
]
],
[
[
"Note that `.loc` **included** `'D'` in its slice!\n\n### Without locators\n\n#### Columns \n\nPandas offers an easier way of slicing one or more columns from a *DataFrame*. ",
"_____no_output_____"
]
],
[
[
"df['B'] # get the column 'B'",
"_____no_output_____"
],
[
"df[['B', 'D', 'E']] # get a slice of the columns 'B', 'D' and 'E'",
"_____no_output_____"
]
],
[
[
"Note that if we slice a single column it will return a `pd.Series`, but if we slice more we'll get a `pd.DataFrame`.\n\nIf we wanted to get a `pd.DataFrame` with a single column we could use this syntax:",
"_____no_output_____"
]
],
[
[
"df[['B']] # get a dataframe containing only the column 'B'",
"_____no_output_____"
]
],
[
[
"Pandas also allows us to slice columns with this syntax:\n\n```python\n\ndf.B # gets the column 'B'\n\n# Equivalient to:\ndf['B']\n\n```\n\nHowever, it is **not** recommended!\n\n#### Slicing rows\n\nWe can easily slice rows like this:\n\n```python\ndf[:2] # first two rows\ndf[-3:] # last three rows\ndf[1:2] # second row\n```\n\nHowever, if we try index a single row, it will raise an error (because it will be looking for a column named 2).\n\n```python\ndf[2] # KeyError\n\n# Instead use\ndf.loc[2]\n# or\ndf.iloc[2]\n```\n\n## Filtering\n\nPandas' allows us to easily apply filters on the *DataFrame* with the same syntax we saw in the previous tutorial. Here it is a bit more intuitive, due to the naming scheme!\n\n### Single condition\n\nLike in *numpy*, operations here (even logical) are performed element-wise and, if necessary, with broadcasting.",
"_____no_output_____"
]
],
[
[
"df['E'] == 'test'",
"_____no_output_____"
]
],
[
[
"If we use the result of the logical condition above as an index, pandas will filter the rows based on the `True` or `False` value.",
"_____no_output_____"
]
],
[
[
"df[df['E'] == 'test'] # keeps the rows that have a value equal to 'test' in column 'E'",
"_____no_output_____"
]
],
[
[
"This leads to very a intuitive and syntactically simple application of filters.\n\n### Combining multiple conditions\n\nTo combine the outcome of more than one logical conditions we have to use the following symbols:\n\n```python\n(cond1) & (cond2) # logical AND\n(cond1) | (cond2) # logical OR\n~ (cond1) # logical NOT\n```\n\n**Don't forget the parentheses!**",
"_____no_output_____"
]
],
[
[
"df[(df['C'] > 1) | (df['E'] == 'test')] # keeps the rows that have a value equal to 'test' \n # in column 'E' or a value larger than 1 in column 'C'",
"_____no_output_____"
]
],
[
[
"## Adding / Deleting\n\n\n### Rows\n\nTo add a new row, we can use `.append()`.",
"_____no_output_____"
]
],
[
[
"# Adds a fifth row to the DataFrame:\ndf.append({'A': 3, \n 'B': pd.Timestamp('20190331'),\n 'C': 4.0,\n 'D': -3,\n 'E': 'train',\n 'F': 'bar'},\n ignore_index=True) ",
"_____no_output_____"
]
],
[
[
"Note that the length and the data types should be compatible! Because this syntax isn't very convenient we usually **avoid using it** altogether.\n\nKeep in mind that this operation **isn't performed inplace**. Instead it returns a copy of the *DataFrame*! If we want to make the append permanent, we can always assign it to itself.",
"_____no_output_____"
]
],
[
[
"df = df.append({'A': 3, \n 'B': pd.Timestamp('20190331'),\n 'C': 4.0,\n 'D': -3,\n 'E': 'train',\n 'F': 'bar'},\n ignore_index=True)\n\ndf",
"_____no_output_____"
]
],
[
[
"Another option would be to add the row through `.loc`:\n\n```python\ndf.loc[len(df)] = [3, pd.Timestamp('20190331'), 4.0, -3, 'train', 'bar']\n```\n\nTo delete a row from a *DataFrame* we can use `.drop()`:\n\n```python\nrow_label # label of the row we want to delete\n\n# Doesn't overwrite df, instead returns a copy:\ndf.drop(row_label) \n\n# Overwrites df:\ndf = df.drop(row_label)\ndf.drop(row_label, inplace=True)\n```",
"_____no_output_____"
]
],
[
[
"df = df.drop(2) # drops the third row from the dataframe",
"_____no_output_____"
]
],
[
[
"### Columns\n\nWe can add a new column in the *DataFrame* like we would an element in a dictionary. Just keep in mind that the dimensions must be compatible (e.g. we can't add 3 elements to a *DataFrame* with four rows).",
"_____no_output_____"
]
],
[
[
"df['G'] = [10, 22, -8, 13]\ndf",
"_____no_output_____"
]
],
[
[
"To delete a row we, again, can use `.drop(col_label, axis=1)`. The parameter `axis=1` tells pandas that we are looking to drop a column and that it should look for the key `col_name` in the columns. ",
"_____no_output_____"
]
],
[
[
"df = df.drop('A', axis=1) # drops column with the label 'A'\ndf",
"_____no_output_____"
]
],
[
[
"## Sorting and rearranging\n\n### Transposing\n\nThis works exactly like in *numpy*.",
"_____no_output_____"
]
],
[
[
"df.T # not inplace",
"_____no_output_____"
]
],
[
[
"### Sorting\n\n- By **value**",
"_____no_output_____"
]
],
[
[
"df = df.sort_values(by='G') # sorts DataFrame according to values from column 'B'\ndf",
"_____no_output_____"
]
],
[
[
"**Caution**: that when performing operations that rearrange the rows, the row labels will **no longer match** the row positions!\n\nTo solve this issue, we can reset the labels to match the positions:\n\n```python\ndf.reindex()\n```\n\nThis won't rearrange the *DataFrame* in any way; it will just **change the labelling of the rows**.\n\n- By **index**",
"_____no_output_____"
]
],
[
[
"df = df.sort_index()\ndf",
"_____no_output_____"
]
],
[
[
"This **rearranged** the *DataFrame* so that the row labels are sorted!\n\nBy adding the argument `axis=1` we can perform these operations on the columns instead.",
"_____no_output_____"
]
],
[
[
"df.sort_index(axis=1, ascending=False) # sort columns so that their names are descending ",
"_____no_output_____"
]
],
[
[
"## Statistical information\n\nThese work only for numerical values. A sample of them are presented below, while there are [many more](https://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats) available. ",
"_____no_output_____"
]
],
[
[
"print('Sum:')\nprint(df.sum()) # sum of each column\nprint('\\nMean:')\nprint(df.mean()) # mean of each column\nprint('\\nMin:')\nprint(df.min()) # minimum element of each column\nprint('\\nMax:')\nprint(df.max()) # maximum element of each column\nprint('\\nStandard deviation:')\nprint(df.std()) # standard deviation of each column\nprint('\\nVariance:')\nprint(df.var()) # variance of each column",
"Sum:\nC 8.0\nD 6.0\nG 37.0\ndtype: float64\n\nMean:\nC 2.00\nD 1.50\nG 9.25\ndtype: float64\n\nMin:\nB 2019-03-30 00:00:00\nC 0\nD -3\nE test\nF bar\nG -8\ndtype: object\n\nMax:\nB 2019-03-31 00:00:00\nC 4\nD 3\nE train\nF foo\nG 22\ndtype: object\n\nStandard deviation:\nC 1.825742\nD 3.000000\nG 12.579746\ndtype: float64\n\nVariance:\nC 3.333333\nD 9.000000\nG 158.250000\ndtype: float64\n"
]
],
[
[
"Keep in mind that, contrary to *numpy*, *pandas* by default ignores `np.nan` values when performing operations.\n\n## Histograms\n\nAnother very convenient functionality offered by *pandas* is to find the unique values of a *Series* and count each value's number of occurrences. \n\n```python\nSeries.unique() # returns an array of the unique values in a pd.Series\nSeries.value_counts() # returns the unique values along with their number of occurrences\n```",
"_____no_output_____"
]
],
[
[
"df['E'].unique() ",
"_____no_output_____"
],
[
"df['E'].value_counts()",
"_____no_output_____"
]
],
[
[
"## Applying functions\n\nOne of the most powerful methods offered is `.apply()`. There are actually two different things that can be done by this method, depending on if it's called from a *DataFrame* or a *Series*.\n\n### *DataFrame.apply()*\n\nWhen called from a *DataFrame*, `.apply()` applies a function to each of the *DataFrame's* columns **independently**. The built-in methods we saw previously produce similar results, the application of a function (e.g. `max`, `min`, `sum`) to every *DataFrame* column.\n\nFor example, how many **unique** values does each column have?\n\n",
"_____no_output_____"
]
],
[
[
"df['C'].unique()",
"_____no_output_____"
]
],
[
[
"The `len()` of this array shows *how many* unique values we have. ",
"_____no_output_____"
]
],
[
[
"len(df['C'].unique())",
"_____no_output_____"
]
],
[
[
"Now, can we apply this function to every column in the *DataFrame*?",
"_____no_output_____"
]
],
[
[
"# First, we need to write a function\n\ndef num_unique(series):\n # function that takes a series and returns the number of unique values it has\n return len(series.unique())\n\n# Then apply in to each of the columns of the DataFrame\ndf.apply(num_unique)",
"_____no_output_____"
]
],
[
[
"It is common to write simple functions like these like **lambda functions** to save space. ",
"_____no_output_____"
]
],
[
[
"df.apply(lambda s: len(s.unique()))",
"_____no_output_____"
]
],
[
[
"### *Series.apply()*\n\nBy calling `.apply()` from a *Series*, it applies the function to **each element** of the *Series* **independently**.\n\nFor example:",
"_____no_output_____"
]
],
[
[
"df['C'].apply(lambda x: x**x)",
"_____no_output_____"
]
],
[
[
"The above line applies the function $f(x) = x^x$ to every element $x$ of `df['C']`.\n\nThis can be used to create **more complicated** filters!\n\n### Advanced filtering with `.apply()`\n\nTo do this, all we have to do is to create a function that returns `bool` values.\n\nFor example, we want to filter `df['B']` so that we keep entries with `30` days. First, we'll create a function that checks if an entry has `30` days or not. ",
"_____no_output_____"
]
],
[
[
"df['B'].apply(lambda x: x.day == 30)",
"_____no_output_____"
]
],
[
[
"The above is equivalent with:\n\n```python\n# Write a function that returns a bool value based on \n# the condition we want to filter the dataframe with\ndef has_30_days(x):\n # returns true if x has 30 days\n return x.day == 30\n\n# Apply the function on column 'B'\ndf['B'].apply(has_30_days)\n```\n\nIf we have created the function, all we have to do is to index the *DataFrame* with the result of the `.apply()`.",
"_____no_output_____"
]
],
[
[
"df[df['B'].apply(lambda x: x.day == 30)]",
"_____no_output_____"
]
],
[
[
"## Dealing with missing data\n\nThis is a very interesting topic, which we will revisit in more detail in a future tutorial.\n\nIn short there are a few easy ways we can quickly deal with missing data. The two main options are:\n\n- Dropping missing data.\n- Filling missing data.\n\nSince *pandas* is built on top of *numpy*, missing data is represented with `np.nan` values. If they aren't, they'll have to be converted to `np.nan`.\n\nLet's first download a sample *DataFrame* and fill it with missing values.",
"_____no_output_____"
]
],
[
[
"url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data' # where to download the data from\ndata = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0,1,2,3]) # load it into a numpy array\ndata[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan # replace some values in random with np.nan\ndata = pd.DataFrame(data, columns=['A', 'B', 'C', 'D']) # load it into a dataframe\ndata.shape",
"_____no_output_____"
]
],
[
[
"This is $150 \\times 4$ *DataFrame* with several missing values. How can we tell how many and where they are?\n\n### Inspecting missing values\n\nThis can be done with `.isna()` or `.isnull()`. What's the difference between the two? Nothing at all ([here](https://datascience.stackexchange.com/a/37879/34269) in an explanation).\n\n`DataFrame.isna()` checks every value one by one if it is `np.nan` or not. The only thing we have to do is aggregate the resulting *DataFrame*.",
"_____no_output_____"
]
],
[
[
"data.isna().any() # checks if a column has at least one missing value or not",
"_____no_output_____"
],
[
"data.isna().sum() # how many missing values per column",
"_____no_output_____"
],
[
"data.isna().sum() / len(data) * 100 # percentage of values missing per column",
"_____no_output_____"
]
],
[
[
"### Dropping missing values\n\nThere are two ways to drop a missing value:\n\n- Drop its **row**.\n- Drop its **column**.\n\nBoth can be accomplished through `.dropna()`.",
"_____no_output_____"
]
],
[
[
"tmp = data.dropna() # drops rows with missing values\nprint(tmp.shape)\ntmp = data.dropna(axis=1) # drops columns with missing values\nprint(tmp.shape)",
"(131, 4)\n(150, 0)\n"
]
],
[
[
"Note that these operations are **not inplace**! If we wanted to overwrite the original *DataFrame* we'd have to write:\n\n```python\ndata = data.dropna()\n# or\ndata.dropna(inplace=True)\n```\n\nThis method also offers many more parameters for \n\n- dropping rows that have missing values **only in specific columns** (`subset`)\n- dropping rows that have **multiple missing values** (more than a threshold `thres`)\n- dropping rows (or columns) that have **all their values missing** (`how='all'`)\n\n### Filling missing values\n\nThis process is often referred to as **imputation**. In *pandas* it done with `.fillna()` and can be accomplished in two ways: either fill the whole *DataFrame* with a single value or fill the each column with a single value.\n\n\nThe first is the easiest to implement.",
"_____no_output_____"
]
],
[
[
"tmp = data.fillna(999) # fills any missing value in the DataFrame with 999\nprint('Mean values for the original DataFrame:\\n', data.mean())\nprint('\\nMean values for the imputed DataFrame:\\n', tmp.mean())",
"Mean values for the original DataFrame:\n A 5.846622\nB 3.053793\nC 3.709859\nD 1.180822\ndtype: float64\n\nMean values for the imputed DataFrame:\n A 19.088667\nB 36.252000\nC 56.792000\nD 27.789333\ndtype: float64\n"
]
],
[
[
"The second way is a bit more interesting. We'll first need to create a dictionary (or something equivalent) telling *pandas* which value to use for each column.",
"_____no_output_____"
]
],
[
[
"fill_values = {'A': -999, 'B':0, 'D': 999} # note that we purposely ignored column 'C'\n\ntmp = data.fillna(fill_values)\nprint('Mean values for the original DataFrame:\\n', data.mean())\nprint('\\nMean values for the imputed DataFrame:\\n', tmp.mean())\nprint('\\nNumber of missing values of the original DataFrame:\\n', data.isna().sum())\nprint('\\nNumber of missing values of the imputed DataFrame:\\n',tmp.isna().sum())",
"Mean values for the original DataFrame:\n A 5.846622\nB 3.053793\nC 3.709859\nD 1.180822\ndtype: float64\n\nMean values for the imputed DataFrame:\n A -7.551333\nB 2.952000\nC 3.709859\nD 27.789333\ndtype: float64\n\nNumber of missing values of the original DataFrame:\n A 2\nB 5\nC 8\nD 4\ndtype: int64\n\nNumber of missing values of the imputed DataFrame:\n A 0\nB 0\nC 8\nD 0\ndtype: int64\n"
]
],
[
[
"One interesting thing we can do is impute the missing values based on a statistic. For example, impute each missing value with its column's mean. ",
"_____no_output_____"
]
],
[
[
"tmp = data.fillna(data.mean())\nprint('Mean values for the original DataFrame:\\n', data.mean())\nprint('\\nMean values for the imputed DataFrame:\\n', tmp.mean())",
"Mean values for the original DataFrame:\n A 5.846622\nB 3.053793\nC 3.709859\nD 1.180822\ndtype: float64\n\nMean values for the imputed DataFrame:\n A 5.846622\nB 3.053793\nC 3.709859\nD 1.180822\ndtype: float64\n"
]
],
[
[
"## Encoding data\n\nEncoding is the process of converting columns containing alphanumeric values (`str`) to numeric ones (`int` or `float`).\n\nThis, too, will be covered in more detail in a later tutorial (*why is it necessary?, what ways there are? *what are the benefits of each?*). However, we'll show two easy ways this can be accomplished through *pandas*.\n\n### Label encoding\n\nThis essentially means mapping each `str` value to an `int` one. One way to do this is to create a dictionary that maps each `str` to an `int` and use the built-in method `.map()`.",
"_____no_output_____"
]
],
[
[
"mapping_dict = {'train': 0, 'test': 1}\ndf['E'].map(mapping_dict) # this is NOT inplace",
"_____no_output_____"
]
],
[
[
"Or we could use `.apply()`.",
"_____no_output_____"
]
],
[
[
"df['E'].apply(lambda x: mapping_dict[x]) # NOT inplace",
"_____no_output_____"
]
],
[
[
"If we wanted to make the operations inplace we could simply write:\n\n```python\nmapping_dict = {'train': 0, 'test': 1}\n\ndf['E'] = df['E'].map(mapping_dict) # using map\n# or\ndf['E'] = df['E'].apply(lambda x: mapping_dict_dict[x]) # using apply\n```\n\n### One-hot encoding\n\nAlso known as **dummy encoding**, this technique is a bit more complicated. To one-hot encode a column, we have to create as many new columns as there are unique values in the original column. Each of those represents one of the unique values. For each entry, we check the original value and set the corresponding new column to $1$, while the rest are set to $0$. An illustration of the process can be seen in the figure below.\n\n\n\nThe good news are that in *pandas* it is easier than it looks!",
"_____no_output_____"
]
],
[
[
"pd.get_dummies(df) # only columns 'E' and 'F' need to be encoded",
"_____no_output_____"
]
],
[
[
"Again, this operation is **not** inplace.\n\n## Pivot tables\n\nPivot tables can provide important insight in the relationship between two or more variables.\n\n*Pandas* actually offers to ways to generate pivot tables, one through a dedicated function `pd.pivot_table()` and one through the *DataFrame* method `.pivot()`. The first is **highly recommended** due to it allowing for the aggregation of duplicate values.",
"_____no_output_____"
]
],
[
[
"df2 = pd.DataFrame({'A': ['foo'] * 6 + ['bar'] * 4,\n 'B': ['one'] * 4 + ['two'] * 2 + ['one'] * 2 + ['two'] * 2,\n 'C': ['small', 'large'] * 5,\n 'D': [1, 2, 2, 2, 3, 3, 4, 5, 6, 7]})\n\ndf2",
"_____no_output_____"
]
],
[
[
"A pivot table requires 3 things:\n\n- `index`: A column so that its values can be set as the **rows** of the pivot table.\n- `columns`: A column so that its values can be set as the **columns** of the pivot table.\n- `values`: A column so that its values can be **aggregated** and placed into the grid defined by the rows and the columns of the pivot table.",
"_____no_output_____"
]
],
[
[
"pd.pivot_table(df2, index='A', columns='B', values='D')",
"_____no_output_____"
]
],
[
[
"The default aggregation function is `np.mean`. How is each position in the grid calculated?\n\nThe first element in the pivot table corresponds to `A == 'bar'` and `B == 'one'`. How many values do we have with this criteria?",
"_____no_output_____"
]
],
[
[
"df2[(df2['A'] == 'bar') & (df2['B'] == 'one')][['D']]",
"_____no_output_____"
]
],
[
[
"We said that by default *pandas* uses `np.mean` as its aggregator, so:",
"_____no_output_____"
]
],
[
[
"df2[(df2['A'] == 'bar') & (df2['B'] == 'one')]['D'].mean()",
"_____no_output_____"
]
],
[
[
"Similarly, the second element in the pivot table has `A == 'bar'` and `B == 'two'`. So its value will be:",
"_____no_output_____"
]
],
[
[
"df2[(df2['A'] == 'bar') & (df2['B'] == 'two')]['D'].mean()",
"_____no_output_____"
]
],
[
[
"Now, what if we want to change the aggregation function to something else, let's say `np.sum`.",
"_____no_output_____"
]
],
[
[
"pd.pivot_table(df2, index='A', columns='B', values='D', aggfunc=np.sum)",
"_____no_output_____"
]
],
[
[
"This simply sums the values of `'D'` that correspond to each position in the pivot table.\n\nAnother interesting choice for an aggregator is `len`. This will **count** the number of values in each position of the grid **instead of aggregating them**. This means the `values` argument is irrelevant when using `aggfunc=len`. ",
"_____no_output_____"
]
],
[
[
"pd.pivot_table(df2, index='A', columns='B', values='D', aggfunc=len)",
"_____no_output_____"
]
],
[
[
"Creating custom functions for aggregation is also an option. For instance if we want to count the number of **unique values** per position:",
"_____no_output_____"
]
],
[
[
"pd.pivot_table(df2, index='A', columns='B', values='D', aggfunc=lambda x: len(x.unique()))",
"_____no_output_____"
]
],
[
[
"Multi-index pivot tables are also an option but we won't go into any more detail.",
"_____no_output_____"
]
],
[
[
"pd.pivot_table(df2, index=['A', 'B'], columns='C', values='D', aggfunc=np.sum)",
"_____no_output_____"
]
],
[
[
"## Merging DataFrames\n\nThis is the action of combining two or more *DataFrames* into one. *Pandas* offers multiple ways of performing such a merger. Let's first create two *DataFrames* that share **only some** of their rows and columns. ",
"_____no_output_____"
]
],
[
[
"df3 = pd.DataFrame({'A': ['df3'] * 4,\n 'B': ['df3'] * 4,\n 'C': ['df3'] * 4,\n 'D': ['df3'] * 4})\ndf3",
"_____no_output_____"
],
[
"df4 = pd.DataFrame({'B': ['df4'] * 4,\n 'D': ['df4'] * 4,\n 'F': ['df4'] * 4}, index=[2, 3, 6, 7])\ndf4",
"_____no_output_____"
]
],
[
[
"`df3` and `df4` have only one column and two rows in common. \n\n### Concatenation\n\nConcatenating these two *DataFrames* is the simplest option and can be performed with `pd.concat()`. As we saw in the previous tutorial, there are two ways we can perform the concatenation:\n\n- along the **rows** (`axis=0`) which would produce a *DataFrame* with $4 + 4 = 8$ rows\n- along its **columns** (`axis=1`) which would produce a *DataFrame* with $4 + 3 = 7$ columns\n\nLet's try the first.",
"_____no_output_____"
]
],
[
[
"pd.concat([df3, df4], sort=False)",
"_____no_output_____"
]
],
[
[
"This concatenation did append the rows of the second *DataFrame* under the first one, but the columns are out of alignment. Why is this?\n\nThis happens because *pandas* used the names of the columns to identify which columns to join. So `df4['B']` went under `df3['B']` and `df4['D']` went under `df3['D']`, but the rest of the columns don't match. The way *pandas* solved it is that it added column `'F'` to `df3` and columns `'A'` and `'C'` to `df4` and filled them with `nan` values. Then it performed the merger as if both *DataFrames* were $4 \\times 5$. This type of merger is called an **outer join** and it is the default for `pd.concat()`. \n\nAlso note that the rows with labels `2` and `3` are present two times in the *DataFrame*. \n\nIn contrast an **inner join** would only keep the columns that exist in **both** *DataFrames* and discard the rest. ",
"_____no_output_____"
]
],
[
[
"pd.concat([df3, df4], join='inner', sort=False)",
"_____no_output_____"
]
],
[
[
"The same things can be said about concatenating along the columns. ",
"_____no_output_____"
]
],
[
[
"pd.concat([df3, df4], axis=1, sort=False)",
"_____no_output_____"
]
],
[
[
"Again, the rows that didn't exist (i.e. `6` and `7` in `df3` and `0` and `1` in `df4`) were created, the columns now have duplicate names (i.e. `'B'` and `'D'` appear twice) and all non-existing values were set to `nan`.\n\nAn inner join would look like this:",
"_____no_output_____"
]
],
[
[
"pd.concat([df3, df4], join='inner', axis=1, sort=False)",
"_____no_output_____"
]
],
[
[
"What if we just wanted to concatenate the *DataFrames*, though... like we did in *numpy* (i.e. join rows regardless their name). To do this we'd have to change the labels of the rows of the `df4` to match those of `df3`.",
"_____no_output_____"
]
],
[
[
"tmp = df4.copy() # create a temporary DataFrame so that we don't overwrite df4\ntmp.index = df3.index # change the index of df4 so that it's identical to df3\npd.concat([df3, tmp], axis=1, sort=False)",
"_____no_output_____"
]
],
[
[
"### SQL-type joins\n\nAs we might have assumed from the previous step, *pandas* supports SQL-type joins.\n\nThe merger is performed on specific columns in both *DataFrames* (referred to as *keys*) or on the row labels (like we did before). \n\nThere are four types of joins:\n\n- **outer**, which, as we saw before, uses the **union of the keys** of the two *DataFrames*. \nSo the rows of the merger will be the rows that exist in both *DataFrames* (i.e. `2` and `3`), the rows that exist only in the first *DataFrame* (i.e. `0` and `1`) and the rows that exist only in the second *DataFrame* (i.e. `6` and `7`).\n- **inner**, which, like before, uses **intersection of the keys** of the two *DataFrames*. \nHere the rows of the merger are only those existing in both *DataFrames* (i.e. `2` and `3`).\n- **left**, which only keeps the keys of the **first** *DataFrame*. \nThe rows will be the keys of the first *DataFrame* (i.e. `0`, `1`, `2` and `3`).\n- **left**, which only keeps the keys of the **second** *DataFrame*. \nThe rows will be the keys of the second *DataFrame* (i.e. `2`, `3`, `6` and `7`).\n\nIn all cases, by default, **all columns will be kept**. They will be, however, renamed if necessary so that there aren't any duplicate column names.",
"_____no_output_____"
]
],
[
[
"pd.merge(df3, df4, how='outer', left_index=True, right_index=True) # the two last parameters instruct pandas \n # to use the rows labels as the keys",
"_____no_output_____"
],
[
"pd.merge(df3, df4, how='inner', left_index=True, right_index=True)",
"_____no_output_____"
],
[
"pd.merge(df3, df4, how='left', left_index=True, right_index=True)",
"_____no_output_____"
],
[
"pd.merge(df3, df4, how='right', left_index=True, right_index=True)",
"_____no_output_____"
]
],
[
[
"### \"Group By\" process\n\nBy “group by” we are referring to a process involving one or more of the following steps:\n\n- **Splitting** the data into groups based on some criteria.\n- **Applying** a function to each group independently.\n - **Aggregation**: compute a statistical summary of each group.\n - **Transformation**: perform an operation that alters the values in one or more groups.\n - **Filtration**: disregard some groups based on a group-wise computation.\n- **Combining** the results into a data structure.\n\nWe'll use `df2` to illustrate this process.",
"_____no_output_____"
]
],
[
[
"df2",
"_____no_output_____"
]
],
[
[
"### Splitting the data\n\nThis step **partitions** the data into **subsets**, based on the values of a column.",
"_____no_output_____"
]
],
[
[
"grouped = df2.groupby(['A'])",
"_____no_output_____"
]
],
[
[
"Since `df2['A']` can take only too values (`'foo'` and `'bar'`), this is roughly equivalent to: ",
"_____no_output_____"
]
],
[
[
"df2[df2['A'] == 'foo']",
"_____no_output_____"
],
[
"df2[df2['A'] == 'bar']",
"_____no_output_____"
]
],
[
[
"However, groupby **doesn't** actually perform the partitioning, it will do so when required in the next steps.\n\nHow can we access the groups?",
"_____no_output_____"
]
],
[
[
"grouped.groups",
"_____no_output_____"
]
],
[
[
"This returns a dictionary with the unique values of `'A'` as its keys and the row indices that correspond to each key as its values.\n\nIf we know which key we want to use we can manually partition the data.",
"_____no_output_____"
]
],
[
[
"grouped.get_group('foo')",
"_____no_output_____"
]
],
[
[
"### Applying functions\n\nThis step allows for the application of a function to each group independently. There are many types of operations we can perform here.\n\n#### Aggregation\n\nThis involves generating a descriptive statistic for each of the groups.",
"_____no_output_____"
]
],
[
[
"grouped.agg(np.mean) # the mean value of each column (only relevant for one column)",
"_____no_output_____"
],
[
"grouped.agg(len) # how many samples does each group have",
"_____no_output_____"
]
],
[
[
"We can even select a **different** aggregation function for each column.",
"_____no_output_____"
]
],
[
[
"grouped.agg({'B': len, # number of values in each group\n 'C': lambda x: len(x.unique()), # unique values in each group\n 'D': np.sum}) # sum the values of each group",
"_____no_output_____"
]
],
[
[
"#### Transformation\n\nThis involves changing some values in the data (each group's values are changed in a different manner). For example:",
"_____no_output_____"
]
],
[
[
"grouped.transform(lambda x: (x - x.min()) / (x.max() - x.min())) # normalize values in each group\ngrouped.transform(lambda x: (x - x.mean()) / x.std()) # standardize values in each group\ngrouped.transform(lambda x: x.fillna(x.mean())) # replace nan values with the mean of each group",
"_____no_output_____"
]
],
[
[
"All the above operations are relevant only for column `'D'` (since it is the only containing numeric values) and are **not** performed inplace. \n\n#### Filtering\n\nThis operation filters groups based on some condition. ",
"_____no_output_____"
]
],
[
[
"grouped.filter(lambda x: x['D'].sum() > 15) # keep only groups that have a sum of values in column 'D' greater than 15",
"_____no_output_____"
]
],
[
[
"#### Regular `.apply()`\n\nAll of the above three effects can be accomplished through `.apply()`. ",
"_____no_output_____"
]
],
[
[
"# Aggregation:\ngrouped['D'].apply(np.sum) # same as: grouped.apply(lambda x: x['D'].sum())\n\n# Transformation:\ngrouped['D'].apply(lambda x: (x - x.min()) / (x.max() - x.min()))\n# equivalent with: grouped.apply(lambda x: (x['D'] - x['D'].min()) / (x['D'].max() - x['D'].min()))\n\n# Filtering:\ngrouped.apply(lambda x: x if x['D'].sum() > 15 else None)",
"_____no_output_____"
]
],
[
[
"The \"group by\" process can be done on multiple indices. However, we won't go more details about this. ",
"_____no_output_____"
]
],
[
[
"df2.groupby(['A','B']).sum() # roughly equivalent to the pivot_table we did previously",
"_____no_output_____"
]
],
[
[
"## Shape manipulation\n\nUnlike *numpy* arrays, *DataFrames* usually aren't made to be reshaped. Nevertheless, *pandas* offers support for stacking and unstacking. ",
"_____no_output_____"
]
],
[
[
"# The stack() method “compresses” a level in the DataFrame’s columns.\nstk = df.stack()\nprint(stk)",
"0 B 2019-03-30 00:00:00\n C 0\n D 3\n E test\n F foo\n G 10\n1 B 2019-03-30 00:00:00\n C 1\n D 3\n E train\n F foo\n G 22\n3 B 2019-03-30 00:00:00\n C 3\n D 3\n E train\n F foo\n G -8\n4 B 2019-03-31 00:00:00\n C 4\n D -3\n E train\n F bar\n G 13\ndtype: object\n"
],
[
"# The inverse operation is unstack()\nstk.unstack()",
"_____no_output_____"
]
],
[
[
"## Input / output operations\n\nThe most common format associated with *DataFrames* is csv.\n\n### CSV\n \nWriting a *DataFrame* to a csv file can be accomplished with a single line.",
"_____no_output_____"
]
],
[
[
"df.to_csv('tmp/my_dataframe.csv') # writes df to file 'my_dataframe.csv' in folder 'tmp'",
"_____no_output_____"
]
],
[
[
"`DataFrame.to_csv()` by default stores **both row and column labels**. Usually we don't want to write the row labels and sometimes we might not even want to write the column labels. This can be accomplished with the following arguments: ",
"_____no_output_____"
]
],
[
[
"df.to_csv('tmp/my_dataframe.csv', header=False, index=False)",
"_____no_output_____"
]
],
[
[
"To load a csv into a *DataFrame* we can use `pd.read_csv()`.",
"_____no_output_____"
]
],
[
[
"tmp = pd.read_csv('tmp/my_dataframe.csv')\ntmp",
"_____no_output_____"
]
],
[
[
"As you can see, by default, *pandas* uses the first line of the csv as its column names. If this isn't desirable, we can use the `header` argument. ",
"_____no_output_____"
]
],
[
[
"tmp = pd.read_csv('tmp/my_dataframe.csv', header=None)\ntmp",
"_____no_output_____"
]
],
[
[
"### MS Excel\n\nPandas can read and write to excel files through two simple functions: `pd.read_excel(file.xlsx)` and `DataFrame.to_excel(file.xlsx)`. Note that this requires an extra library (`xlrd`)\n\n### Other options \nOther options include pickle, json, SQL databases, clipboard, URLs and even integration with the google analytics API.\n\n## Exploratory Data Analysis\n\nWe've only scratched the surface of the capabilities of the *pandas* library. In order to get a better understanding of the library and how it's used, we'll attempt to perform an exploratory data analysis on the adult income dataset.\n\nWhen doing Exploratory Data Analysis (EDA), we want to observe and summarize our data through descriptive statistics so that we have a better understanding of them. ",
"_____no_output_____"
]
],
[
[
"url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'\ndata = pd.read_csv(url, header=None)\ndata.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status',\n 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss',\n 'hours-per-week', 'native-country', 'income']",
"_____no_output_____"
]
],
[
[
"The first thing we want to do is inspect the shape of the *DataFrame*.",
"_____no_output_____"
]
],
[
[
"data.shape",
"_____no_output_____"
]
],
[
[
"Our data contains 32561 rows and 15 columns. If we take a look at the [description](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names) of the dataset we see that it contains both continuous valued variables (age, working hours etc.) and categorical ones (sex, relationship etc.). When performing data analysis it is important to know what each variable represents.\n\nThe next thing we'll do is to look at a sample of the dataset.",
"_____no_output_____"
]
],
[
[
"data.head()",
"_____no_output_____"
]
],
[
[
"For each variable we'll see what values it can take.",
"_____no_output_____"
]
],
[
[
"print('minimum:', data['age'].min())\nprint('maximum:', data['age'].max())\nprint('mean: ', data['age'].mean())",
"minimum: 17\nmaximum: 90\nmean: 38.58164675532078\n"
]
],
[
[
"`age` is a numeric variable that has a minimum value of $17$ and a max of $90$. While we can run any descriptive statistics on this variable, to have a complete perspective we must visualize it (see a later tutorial on how to do so). ",
"_____no_output_____"
]
],
[
[
"data['workclass'].value_counts()",
"_____no_output_____"
]
],
[
[
"Here we find our first occurrence of missing values. In this dataset, these are represented by question marks (`?`).",
"_____no_output_____"
]
],
[
[
"print('minimum:', data['fnlwgt'].min())\nprint('maximum:', data['fnlwgt'].max())\nprint('mean: ', data['fnlwgt'].mean())",
"minimum: 12285\nmaximum: 1484705\nmean: 189778.36651208502\n"
]
],
[
[
"This variable is continuous-valued and represents the demographics of the individual. \n\n>Description of fnlwgt (final weight)\n\n> The weights on the CPS files are controlled to independent estimates of the\n civilian noninstitutional population of the US. These are prepared monthly\n for us by Population Division here at the Census Bureau. We use 3 sets of\n controls.\n These are:\n 1. A single cell estimate of the population 16+ for each state.\n 2. Controls for Hispanic Origin by age and sex.\n 3. Controls by Race, age and sex.\n \n> We use all three sets of controls in our weighting program and \"rake\" through\n them 6 times so that by the end we come back to all the controls we used.\n \n> The term estimate refers to population totals derived from CPS by creating\n \"weighted tallies\" of any specified socio-economic characteristics of the\n population.\n \n> People with similar demographic characteristics should have\n similar weights. There is one important caveat to remember\n about this statement. That is that since the CPS sample is\n actually a collection of 51 state samples, each with its own\n probability of selection, the statement only applies within\n state.",
"_____no_output_____"
]
],
[
[
"data['education'].value_counts()",
"_____no_output_____"
],
[
"data['education-num'].value_counts()",
"_____no_output_____"
]
],
[
[
"The latter is simply an encoded version of the first.",
"_____no_output_____"
]
],
[
[
"data['marital-status'].value_counts()",
"_____no_output_____"
],
[
"data['occupation'].value_counts()",
"_____no_output_____"
],
[
"data['relationship'].value_counts()",
"_____no_output_____"
],
[
"data['race'].value_counts()",
"_____no_output_____"
],
[
"data['sex'].value_counts()",
"_____no_output_____"
],
[
"print(len(data[data['capital-gain'] == 0]))\nprint(len(data[data['capital-gain'] != 0]))",
"29849\n2712\n"
],
[
"print(len(data[data['capital-loss'] == 0]))\nprint(len(data[data['capital-loss'] != 0]))",
"31042\n1519\n"
],
[
"print('minimum:', data['hours-per-week'].min())\nprint('maximum:', data['hours-per-week'].max())\nprint('mean: ', data['hours-per-week'].mean())",
"minimum: 1\nmaximum: 99\nmean: 40.437455852092995\n"
],
[
"data['native-country'].value_counts()",
"_____no_output_____"
]
],
[
[
"### Data Preparation\n\nNext, well look at several ways we might have to manipulate our data, including data cleaning, imputing and transforming \n\nBecause the unknown values are represented as question marks this dataset, we need to handle them. \n\n#### Example: fill the occupation with the most frequent element",
"_____no_output_____"
]
],
[
[
"most_freq = data['occupation'].mode()[0] # find the most common element\n\ndata['occupation'] = data['occupation'].apply(lambda x: most_freq if x == ' ?' else x)\n# the line above first keeps just the column that represents the occupations from the dataframe\n# then it applies a function which checks if those values are question marks and changes them to the most common element\n# finally it replaces the original occupations with the new ones\n\ndata['occupation'].value_counts()",
"_____no_output_____"
]
],
[
[
"Notice that all elements are proceeded by a whitespace? Can we remove it and clean our data?",
"_____no_output_____"
]
],
[
[
"print('Before cleaning: `{}`'.format(data.occupation[0]))\ndata.occupation = data['occupation'].apply(lambda x: x.strip())\nprint('After cleaning: `{}`'.format(data.occupation[0]))",
"Before cleaning: ` Adm-clerical`\nAfter cleaning: `Adm-clerical`\n"
]
],
[
[
"### Exercise 2\n\nFill the missing values of the DataFrame's `native-country` column with whatever strategy you wish.\n\n### Solution\n\nThis time we'll drop the rows containing missing values.",
"_____no_output_____"
]
],
[
[
"print('DataFrame length:', len(data))\nprint('missing: ', len(data[data['native-country'] == ' ?']))\n\ndata = data.drop(data[data['native-country'] == ' ?'].index)\n\nprint('DataFrame length:', len(data))\nprint('missing: ', len(data[data['native-country'] == ' ?']))",
"DataFrame length: 32561\nmissing: 583\nDataFrame length: 31978\nmissing: 0\n"
]
],
[
[
"Finally, let's try to **encode** our data. \n \nTo illustrate how they would be performed in *pandas*: We will first encode the `education` variable preserving its sequential nature. Next, we will perform a custom encoding on the `marital-status` variable so that we keep only two categories (i.e. currently married and not). Finally, we will one-hot encode all the remaining categorical variables in the dataset. \n\nFirst, `education`.",
"_____no_output_____"
]
],
[
[
"data['education'] = data['education'].apply(lambda x: x.strip()) # clean whitespace on category\n\n# Create a dictionary mapping the categories to their encodings.\n# This has to be done manually as the exact sequence has to be taken into consideration.\nmappings = {'Preschool': 1, '1st-4th': 2, '5th-6th': 3, '7th-8th': 4, '9th': 5, '10th': 6,\n '11th': 7, '12th': 8, 'HS-grad': 9, 'Some-college': 10, 'Assoc-voc': 11, 'Assoc-acdm':12,\n 'Bachelors': 13, 'Masters': 14, 'Prof-school': 15, 'Doctorate': 16}\n\n\ndata['education'] = data['education'].map(mappings) # encode categorical variable with custom mapping\n# another way to do this would be: data.replace(mappings, inplace=True) \n# another way this could be done would be through data.education.astype('category'),\n# this however would prevent us from choosing the mapping scheme\ndata['education'].value_counts()",
"_____no_output_____"
]
],
[
[
"Next, `marital-status`.",
"_____no_output_____"
]
],
[
[
"data['marital-status'] = np.where(data[\"marital-status\"] == ' Married-civ-spouse', 1, 0)\n# the above function replaces ' Married-civ-spouse' with 1 and all the rest with 0\n\ndata['marital-status'].value_counts()",
"_____no_output_____"
]
],
[
[
"After this, we'll one-hot encode all rest categorical variables. Note that we haven't dealt with all missing values yet (in a real scenario we should). ",
"_____no_output_____"
]
],
[
[
"print('Before one-hot encoding:', data.shape)\n\ndata = pd.get_dummies(data) # one-hot encode all categorical variables\n\nprint('After one-hot encoding: ', data.shape)",
"Before one-hot encoding: (31978, 15)\nAfter one-hot encoding: (31978, 87)\n"
]
],
[
[
"Finally, we'll see how we can split numerical values to separate bins, in order to convert them to categorical. This time around we won't replace the numerical data but create a new variable.",
"_____no_output_____"
]
],
[
[
"data['age_categories'] = pd.cut(data.age, 3, labels=['young', 'middle aged', 'old'])\ndata['age_categories'].value_counts()",
"_____no_output_____"
]
],
[
[
"When binning would usually want each bin to have the same number of samples. In order to do this we need to manually find there to cut each bin input the *cut points* instead of the number of bins we want. But we'll leave that up to you!",
"_____no_output_____"
],
[
"## Bonus material:\n\n### Data wrangling:\n\n- [extended data wrangling tutorial](http://nbviewer.jupyter.org/github/fonnesbeck/Bios8366/blob/master/notebooks/Section2_2-Data-Wrangling-with-Pandas.ipynb)\n\n### Dealing with inconsistent text data\n\nOne of the most common problems when dealing with text data is inconsistency. This may occur due to spelling errors, differences when multiple people perform the data entry, etc.",
"_____no_output_____"
]
],
[
[
"df6 = pd.DataFrame({'fname':['George', 'george ', 'GEORGIOS', 'Giorgos', ' Peter', 'Petet'],\n 'sname':['Papadopoulos', 'alexakos ', 'Georgiou', 'ANTONOPOULOS', ' Anastasiou', 'Κ'],\n 'age': [46, 34, 75, 24, 54, 33]})\ndf6",
"_____no_output_____"
]
],
[
[
"When looking at the example above, several inconsistencies become apparent. The first thing we want to do when dealing with strings is to convert them all to lowercase (or uppercase depending on preference) and remove preceding and succeeding whitespace.",
"_____no_output_____"
]
],
[
[
"def clean_text(text):\n text = text.strip() # strip whitespace\n text = text.lower() # convert to lowercase\n return text\n\ndf6['fname'] = df6['fname'].apply(clean_text)\ndf6['sname'] = df6['sname'].apply(clean_text)\n\n# same could be done through a lambda function\n# df.fname.apply(lambda x: x.strip().lower())\n\ndf6",
"_____no_output_____"
]
],
[
[
"Another problem originates from the way each name was entered. There are two ways to deal with this, one is to manually look and change errors, and the other is to compare the strings to find differences.\n\nWe are going to try the second, through the python package [fuzzywuzzy](https://github.com/seatgeek/fuzzywuzzy).",
"_____no_output_____"
]
],
[
[
"from fuzzywuzzy import process\nprocess.extract('george', df6['fname'], limit=None)",
"c:\\users\\thano\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\fuzzywuzzy\\fuzz.py:35: UserWarning: Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning\n warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')\n"
]
],
[
[
"*Fuzzywuzzy* compares strings and outputs a score depending on how close they are. Let's replace the close ones:",
"_____no_output_____"
]
],
[
[
"def replace_matches_in_column(df, column, target_string, min_ratio=50):\n \n # find unique elements in specified column\n strings = df[column].unique()\n \n # see how close these elements are to the target string\n matches = process.extract(target_string, strings, limit=None)\n\n # keep only the closest ones\n close_matches = [matches[0] for matches in matches if matches[1] >= min_ratio]\n\n # get the rows of all the close matches in our dataframe\n rows_with_matches = df[column].isin(close_matches)\n\n # replace all rows with close matches with the input matches \n df.loc[rows_with_matches, column] = target_string",
"_____no_output_____"
],
[
"replace_matches_in_column(df6, 'fname', 'george')\nreplace_matches_in_column(df6, 'fname', 'peter')\n\ndf6",
"_____no_output_____"
]
],
[
[
"There, all clean! Note that the `min_ratio` we used is **very low**. We usually require much closer matches in order to replace.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e79ac2029eebb2177f260983b6e8441d58b47a6d | 219,253 | ipynb | Jupyter Notebook | preprocessing_pipeline.ipynb | toskn/diploma | 6c44c5c043f2fe3b605ab7e74b1fb355686f97af | [
"MIT"
] | 1 | 2021-11-06T08:18:00.000Z | 2021-11-06T08:18:00.000Z | preprocessing_pipeline.ipynb | toskn/thesis | 6c44c5c043f2fe3b605ab7e74b1fb355686f97af | [
"MIT"
] | null | null | null | preprocessing_pipeline.ipynb | toskn/thesis | 6c44c5c043f2fe3b605ab7e74b1fb355686f97af | [
"MIT"
] | null | null | null | 66.682786 | 39,564 | 0.708036 | [
[
[
"# Обработка и разбор данных от Semantic Hub",
"_____no_output_____"
],
[
"Клетки, которые выводят информацию о данных предоставленных Semantic Hub, были очищены",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport os\nfrom tqdm import tqdm\nimport re\nimport json\nfrom collections import Counter\nimport ast\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport nltk",
"_____no_output_____"
]
],
[
[
"## Заходим в папку с полученными данными и получаем список файлов",
"_____no_output_____"
]
],
[
[
"# путь к папке со всеми файлами\npath = '/Users/egor/Desktop/SemanticHub/diploma/relatives_data/jsons'",
"_____no_output_____"
],
[
"list_of_files = os.listdir(path) # список названий файлов",
"_____no_output_____"
]
],
[
[
"## Делаем список json объектов, с которыми удобно работать",
"_____no_output_____"
]
],
[
[
"list_full_file_json = [] # список, в котором каждый элемент - содержимое одного файла\n\n# цикл для прохода по всем файлам и сбора их содержимого\nfor filename in tqdm(list_of_files):\n # абсолютный путь к единичному файлу\n path_single = path + '/' + filename\n # открываем\n with open(path_single) as file:\n file = json.load(file)\n list_full_file_json.append(file)",
"100%|██████████| 386254/386254 [02:53<00:00, 2220.16it/s]\n"
],
[
"# пример удобной работы\nlist_full_file_json[90564]['text']",
"_____no_output_____"
]
],
[
[
"## Подготовим поле *text* к обработке",
"_____no_output_____"
]
],
[
[
"# для этого удалим все, кроме текста самого сообщения\n# вообще можно будет пытаться еще пол вытягивать и всякие другие данные типа даты, города, имени\n\n# 1\n\n# текст окружен двумя пробелами странной длины, это нам поможет\npattern = re.compile(' (.*) ')\n\n# это будет список номеров тех сообщений, которые не прошли по первому паттерну\ni = 0\nlist_err = []\n\nfor message in tqdm(list_full_file_json):\n try:\n message['text'] = pattern.search(message['text']).group(1)\n i += 1\n except AttributeError:\n list_err.append(i)\n i += 1\n",
"100%|██████████| 386254/386254 [00:10<00:00, 37633.50it/s]\n"
],
[
"list_err[:10]",
"_____no_output_____"
],
[
"# 2\n\n# ненужный текст начиается с хмл, а заканчивается годом, после нее идет само сообщение\n# возможно такое, что в сообщении есть упоминание года, поэтому оно будет урезано, но иначе работать сложно, \n# потому что встречается много вариантов времени и других вещей перед годом:\n# наличие/отсутсвие часа, секунд, UTC и т.п.\n# \n# upd: посмотрел глазами, некрасивых вариантов особо не заметил\npattern = re.compile('xml.*\\d\\d\\d\\d(.*)')\n\nfor number in tqdm(list_err):\n if pattern.search(list_full_file_json[number]['text']).group(1) != ' ':\n list_full_file_json[number]['text'] = pattern.search(list_full_file_json[number]['text']).group(1)\n ",
"100%|██████████| 3269/3269 [00:00<00:00, 12722.12it/s]\n"
],
[
"for number in list_err[50:60]:\n print(list_full_file_json[number]['text'])\n print('//////////////////////////////////////')",
"_____no_output_____"
],
[
"# уберем форумные смайлики из сообщений\npattern = re.compile(':[a-zA-Z]+:')\n\nfor message in tqdm(list_full_file_json):\n message['text'] = pattern.sub('', message['text'])\n",
"100%|██████████| 386254/386254 [00:05<00:00, 70739.81it/s] \n"
],
[
"# заменим множественные пробелы на единичные,\n# уберем пробелы в начале и в конце предложения\n\n# паттерн для поиска пробелов\npattern = re.compile('\\s+')\n\nfor message in tqdm(list_full_file_json):\n message['text'] = pattern.sub(' ', message['text'])\n # убираю пробел в начале\n if message['text'][0] == ' ':\n message['text'] = message['text'][1:]\n # убираю пробел в конце \n if message['text'][-1:] == ' ':\n message['text'] = message['text'][:len(message['text'])-1]\n # добавляю знак препинания в конце, если его нет. \n if message['text'][-1:] != '.' and message['text'][-1:] != '!' and message['text'][-1:] != '?' and message['text'][-1:] != '…':\n message['text'] = message['text'] + '.'\n \n \n ",
"100%|██████████| 386254/386254 [00:29<00:00, 13170.82it/s]\n"
],
[
"list_full_file_json[90564]['text']",
"_____no_output_____"
],
[
"list_full_file_json[0]['annotation_sets']",
"_____no_output_____"
]
],
[
[
"Поле **text** подготовлено, можно провести количественный анализ и построить разные визуализации",
"_____no_output_____"
],
[
"## Анализ текстового содержания – поле *text*",
"_____no_output_____"
],
[
"Для проведения анализа заполним датафрейм пандас важными для анализа характеристиками:\n+ количество слов в каждом документе\n+ количество предложений в каждом документе\n+ количество цепочек кореференции в каждом документе\n+ количество выделенных слов в каждом документе\n\n\n\n+ количество цепочек кореференции всего\n+ количество выделенных слов всего\n+ количество документов всего",
"_____no_output_____"
],
[
"### Распределение количества слов в каждом документе",
"_____no_output_____"
]
],
[
[
"# список количеств слов\nwords_list = []\n# паттерн для деления по пробелам/переносам\npattern = re.compile('\\s+')\n\nfor message in tqdm(list_full_file_json):\n words_message = len(pattern.split(message['text']))\n words_list.append(words_message)",
"100%|██████████| 386254/386254 [00:25<00:00, 15195.45it/s]\n"
],
[
"# посмотрим на самые частые количества слов на одно сообщение\nc = Counter(words_list).most_common(10)\nc",
"_____no_output_____"
],
[
"# построим по полученным данным нормализованную гистограмму\nc = Counter(words_list)\nkeys = list(c.keys())\nvalues = list(c.values())\n\n#n, bins, patches = sns.histplot(x=keys, weights=values, discrete=True, , bins=90, facecolor='#2ab0ff', edgecolor='#e0e0e0', linewidth=0.5, alpha=0.7)\nn, bins, patches = plt.hist(x = keys, weights=values, bins=np.arange(len(keys))-0.5, facecolor='#2ab0ff', alpha=0.9)\nn = n.astype('int')\n\nfor i in range(len(patches)):\n patches[i].set_facecolor(plt.cm.viridis(n[i]/max(n)))\n\n#plt.style.use('seaborn-whitegrid')\n\nplt.xticks(np.arange(min(keys), max(keys)+1, 30.0))\nplt.xlim(0, 700)\n\nplt.xlabel('Words', fontsize=20)\nplt.ylabel('Documents', fontsize=20)\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\n#fig.savefig('test2png.png', dpi=100)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Распределение количества предложений в каждом документе",
"_____no_output_____"
]
],
[
[
"# список количеств слов\nsentences_list = []\n# паттерн для деления на предложения, объяснение:(https://regex101.com/r/he9d1P/1)\n# почти всегда делит правильно, в предлоежниях с именем и отчеством через точку ошибается на +1, что\n# для выборки в 400к незначительно\npattern = re.compile('.*?(\\.|\\?|\\!|…)(?= *[А-Я]|$)')\n\nfor message in tqdm(list_full_file_json):\n sentences_message = len(pattern.findall(message['text']))\n sentences_list.append(sentences_message)",
"100%|██████████| 386254/386254 [00:13<00:00, 29318.50it/s]\n"
],
[
"# посмотрим на самые частые количества предложений на одно сообщение\nc = Counter(sentences_list).most_common(15)\nc",
"_____no_output_____"
],
[
"# построим по полученным данным нормализованную гистограмму\nc = Counter(sentences_list)\nkeys = list(c.keys())\nvalues = list(c.values())\n\n#n, bins, patches = sns.histplot(x=keys, weights=values, discrete=True, , bins=90, facecolor='#2ab0ff', edgecolor='#e0e0e0', linewidth=0.5, alpha=0.7)\nn, bins, patches = plt.hist(x = keys, weights=values, bins=np.arange(len(keys))-0.5, facecolor='#2ab0ff', edgecolor='#e0e0e0', alpha=0.7)\nn = n.astype('int')\n\nfor i in range(len(patches)):\n patches[i].set_facecolor(plt.cm.viridis(n[i]/max(n)))\n\n#plt.style.use('seaborn-whitegrid')\n\nplt.xticks(np.arange(min(keys), max(keys)+1, 3.0))\nplt.xlim(0, 61)\n\npatches[5].set_fc('#FDEE70') # Set color\n\nplt.xlabel('Sentences', fontsize=20)\nplt.ylabel('Documents', fontsize=20)\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\n#fig.savefig('test2png.png', dpi=100)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Количество кореференциальных цепочек в каждом документе",
"_____no_output_____"
]
],
[
[
"# скрипт для приведения каждого размеченного для кореференции элемента к JSON виду\n# для удобного обращения к параметрам\nfiles_chain_list = []\n\nfor message in tqdm(list_full_file_json):\n list_coref_ent = message['annotation_sets']['']['annotations']\n message_chain_list = []\n \n for entity in list_coref_ent:\n entity = str(entity)[13:-1] # адхок подрезка формата SH\n entity = ast.literal_eval(entity) # превращаю строку в виде дикта в дикт\n entity = json.dumps(entity) # делаем джсон из дикта\n message_chain_list.append(json.loads(entity)) # парсим джсон в список\n \n files_chain_list.append(message_chain_list) # в этом списке каждый файл идет отдельным списком-джсоном\n \n \n",
"100%|██████████| 386254/386254 [03:32<00:00, 1819.19it/s]\n"
],
[
"# теперь составим список, где каждый элемент будет представлять количество цепочек в документе\nchain_amount_list = []\n\nfor json_item in tqdm(files_chain_list):\n chain_num_list = []\n \n for word in json_item:\n chain_num_list.append(word['antecedent_id'])\n \n # через каунтер приведем все к списку туплов и посчитаем количество туплов\n c = Counter(chain_num_list)\n chain_amount_list.append(len(c))",
"100%|██████████| 386254/386254 [00:09<00:00, 40910.10it/s]\n"
],
[
"c = Counter(chain_amount_list).most_common(15)\nc",
"_____no_output_____"
],
[
"# построим по полученным данным нормализованную гистограмму\nc = Counter(chain_amount_list)\nkeys = list(c.keys())\nvalues = list(c.values())\n\n#n, bins, patches = sns.histplot(x=keys, weights=values, discrete=True, , bins=90, facecolor='#2ab0ff', edgecolor='#e0e0e0', linewidth=0.5, alpha=0.7)\nn, bins, patches = plt.hist(x = keys, weights=values, bins=np.arange(len(keys))-0.5, facecolor='#2ab0ff', edgecolor='#e0e0e0', alpha=0.7)\nn = n.astype('int')\n\nfor i in range(len(patches)):\n patches[i].set_facecolor(plt.cm.viridis(n[i]/max(n)))\n\n#plt.style.use('seaborn-whitegrid')\n\nplt.xticks(np.arange(min(keys), max(keys)+1, 3.0))\nplt.xlim(0, 24)\n\nplt.xlabel('Coreference Chains', fontsize=20)\nplt.ylabel('Documents', fontsize=20)\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\n#fig.savefig('test2png.png', dpi=100)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Распределение количества слов, выделенных, как значимые для кореференции. ",
"_____no_output_____"
]
],
[
[
"# сделаем список, каждый элемент которого - количество выделенных сущностей на файл\nchain_entity_count_list = []\n\nfor chain_list in tqdm(files_chain_list):\n file_chain = len(chain_list)\n chain_entity_count_list.append(file_chain)",
"100%|██████████| 386254/386254 [00:00<00:00, 405286.70it/s]\n"
],
[
"# посмотрим на самые частые количества выделенных слов на одно сообщение\nc = Counter(chain_entity_count_list).most_common(10)\nc",
"_____no_output_____"
],
[
"# построим по полученным данным нормализованную гистограмму\nc = Counter(chain_entity_count_list)\nkeys = list(c.keys())\nvalues = list(c.values())\n\n#n, bins, patches = sns.histplot(x=keys, weights=values, discrete=True, , bins=90, facecolor='#2ab0ff', edgecolor='#e0e0e0', linewidth=0.5, alpha=0.7)\nn, bins, patches = plt.hist(x = keys, weights=values, bins=np.arange(len(keys))-0.5, facecolor='#2ab0ff', edgecolor='#e0e0e0', alpha=0.7)\nn = n.astype('int')\n\nfor i in range(len(patches)):\n patches[i].set_facecolor(plt.cm.viridis(n[i]/max(n)))\n\n#plt.style.use('seaborn-whitegrid')\n\nplt.xticks(np.arange(min(keys), max(keys)+1, 3.0))\nplt.xlim(0, 51)\n\nplt.xlabel('Tagged Words', fontsize=20)\nplt.ylabel('Documents', fontsize=20)\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\n#fig.savefig('test2png.png', dpi=100)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Общее количество кореференциальных цепочек",
"_____no_output_____"
]
],
[
[
"# список количества цепочек в сообщении\nc = Counter(chain_amount_list).most_common()\n# счетчик цепочек\ncounter = 0\n\nfor pair in tqdm(c):\n counter += pair[0]*pair[1]\ncounter",
"100%|██████████| 64/64 [00:00<00:00, 292413.35it/s]\n"
]
],
[
[
"### Общее количество элементов в кореференциальных цепочках ",
"_____no_output_____"
]
],
[
[
"# список количества выделенных слов в сообщении\nc = Counter(chain_entity_count_list).most_common()\n# счетчик цепочек\ncounter = 0\n\nfor pair in tqdm(c):\n counter += pair[0]*pair[1]\ncounter",
"100%|██████████| 136/136 [00:00<00:00, 245028.07it/s]\n"
]
],
[
[
"### Отношение количества слов в сообщении к количеству цепочек",
"_____no_output_____"
]
],
[
[
"# количество слов по сообщениям уже есть\n# количество цепочек по сообщениям уже есть\n# осталось посчитать количество каждых возможных пар\nword_chain_count_list = []\nfor word_count, chain_count in zip(words_list, chain_amount_list):\n word_chain_count_list.append((word_count, chain_count, ))\nc = Counter(word_chain_count_list)\nc",
"_____no_output_____"
],
[
"ser = pd.Series(list(dict(c).values()), index=pd.MultiIndex.from_tuples(dict(c).keys()))\ndf = ser.unstack().fillna(0)\ndf.shape",
"_____no_output_____"
],
[
"sns.heatmap(df)\n\nplt.xlim(0, 29)\nplt.ylim(0, 295)\n\n\nplt.xlabel('Coreference Chains', fontsize=20)\nplt.ylabel('Words', fontsize=20)\n\nfig = plt.gcf()\nfig.set_size_inches(18.5, 10.5)\n#fig.savefig('test2png.png', dpi=100)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Перевод данных в формат CoNLL",
"_____no_output_____"
],
[
"Имеющиеся переменные:\n+ количество слов\n+ количество предложений\n+ количество кореференциальных цеопчек\n+ количество выделенных слов\n+ список джейсонов аннотаций\n+ список полных джейсонов",
"_____no_output_____"
]
],
[
[
"words_list[:10]",
"_____no_output_____"
],
[
"sentences_list[:10]",
"_____no_output_____"
],
[
"files_chain_list[:1]",
"_____no_output_____"
],
[
"list_full_file_json[8]['text']",
"_____no_output_____"
],
[
"conll_df = pd.DataFrame({'doc_name': [], 'zeros': [], 'word_num_sent': [], 'sent_entry': [], 'pos': [], 'star1': [], 'empty1': [], 'empty2': [],'empty3': [],'empty4': [],'empty5': [], 'star2': [], 'coref': [],})\n\nconll_df.head()",
"_____no_output_____"
],
[
"name_ptt = re.compile('[^/]+$') # а был же уже файл ну ладно и так можно....\nsentence_ptt = re.compile('.*?(?:\\.|\\?|\\!|…)(?= *[А-Я]|$)')",
"_____no_output_____"
],
[
"# создадим все нужные параметры, чтобы потом построчно записывать в датафрейм \ndocument_name = ''\nsentences_list = []\nnames_list = []\nfor document in tqdm(list_full_file_json):\n \n # имя документа\n document_name = name_ptt.search(document['features']['path']).group(0) # nedug-ru_734132-0000.xml\n \n # предложения документа \n document_sentences = sentence_ptt.findall(document['text'])\n sentences_list.append(document_sentences)\n names_list.append(document_name)\nsentences_list[0][0]",
"_____no_output_____"
],
[
"sentences_list[0][0][0]",
"_____no_output_____"
],
[
"tagged_sentences_list = []\nfor sentences_document in tqdm(sentences_list):\n tagged_sentences_doc = []\n for sentence in sentences_document:\n # слова в предложениях + pos\n tokens = nltk.word_tokenize(sentence)\n tagged_sentences_doc.append(nltk.pos_tag(tokens, lang = 'rus'))\n tagged_sentences_list.append(tagged_sentences_doc)",
"100%|██████████| 386254/386254 [2:25:51<00:00, 44.14it/s] \n"
],
[
"tagged_sentences_list[1] #[0]#[0]#[1]",
"_____no_output_____"
],
[
"# осталось только сделать кореференцию\n# получим список неочищенных файлов, чтобы обращаться к кореференциальным\n# цепочкам по их положению в тексте,\n# а не по написанию, таким образом ничего не перепутается\n\nlist_full_file_json_dirty = [] # список, в котором каждый элемент - содержимое одного файла\n\n# цикл для прохода по всем файлам и сбора их содержимого\nfor filename in tqdm(list_of_files):\n # абсолютный путь к единичному файлу\n path_single = path + '/' + filename\n # открываем\n with open(path_single) as file:\n file = json.load(file)\n list_full_file_json_dirty.append(file)",
"100%|██████████| 386254/386254 [12:19<00:00, 522.35it/s] \n"
],
[
"type(list_full_file_json_dirty[1]['annotation_sets']['']['annotations'])#[1]['features']['_string']",
"_____no_output_____"
],
[
"print(len(names_list), len(tagged_sentences_list), len(list_full_file_json_dirty))",
"2 2 2\n"
],
[
"# количество элементов во всех списках одинаковое, поэтому можно сделать zip, а не map\nfor filename, tagged_sentences_list_small, annotations_text in zip(names_list, tagged_sentences_list, list_full_file_json_dirty):\n \n # кореференция\n # отсюда буду получать номера начала и конца кореференциального участника\n annotations = annotations_text['annotation_sets']['']['annotations'].copy()\n # здесь буду искать по номерам начала и конца кусочек текста - на всякий случай\n text = annotations_text['text']\n # список, который станет строчками датафрейма\n list_of_rows = []\n \n for sentences in tagged_sentences_list_small:\n \n \n word_position_counter = 0\n \n for sentence in sentences:\n \n row = []\n \n \n #первый столбец имя файла\n row.append(filename)\n # строка нулей\n row.append(0)\n # номер слова\n row.append(word_position_counter)\n word_position_counter += 1\n # единица предложения\n row.append(sentence[0])\n # pos-tag для единицы\n row.append(sentence[1])\n # строка для дерева\n row.append('-')\n # строка \n row.append('-')\n # строка \n row.append('-')\n # строка \n row.append('-')\n # строка \n row.append('-')\n # строка \n row.append('__')\n # строка \n row.append('*')\n \n # реверс нужен для удаления\n for features_list in reversed(annotations):\n # проходим по списку фичерсов и смотрим: если совпадает sentence[0][0] со _string,\n #то добавляем слову кореф\n string_from_txt = features_list['features']['_string']\n \n if sentence[0] == string_from_txt:\n row.append('(' + str(features_list['features']['antecedent_id']) + ')')\n # удаляем использованный тег кореференции\n annotations.remove(features_list)\n break\n else:\n row.append('-')\n \n list_of_rows.append(row)\n \n conll_df = conll_df.append(pd.DataFrame(list_of_rows, columns=conll_df.columns)) \n \n",
"_____no_output_____"
],
[
"#conll_df = conll_df.iloc[0:0] # лучше дропа ",
"_____no_output_____"
],
[
"conll_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 11932678 entries, 0 to 310\nData columns (total 14 columns):\ndoc_name object\nzeros float64\nword_num_sent float64\nsent_entry object\npos object\nstar1 object\nempty1 object\nempty2 object\nempty3 object\nempty4 object\nempty5 object\nstar2 object\ncoref object\nsentence_number int64\ndtypes: float64(2), int64(1), object(11)\nmemory usage: 1.3+ GB\n"
],
[
"def getSizeOfNestedList(listOfElem):\n ''' Get number of elements in a nested list'''\n count = 0\n # итерируем по списку\n for elem in listOfElem:\n # проверяем тип элемента\n if type(elem) == list: \n # и ркекурсивно вызываем снова эту функцию для подсчета\n count += getSizeOfNestedList(elem)\n else:\n count += 1 \n return count",
"_____no_output_____"
],
[
"getSizeOfNestedList(tagged_sentences_list)",
"_____no_output_____"
]
],
[
[
"# Результаты 180 часов работы программы.\nБыло обработано и внесено в датафрейм 11 932 678 элементов из 64 851 795.\n\nОсталось обработать 52 919 117 элементов\n\nНа настоящем этапе правильно будет остановиться на этом и выделить доступные компьютерные мощности на запуск следующих этапов пайплайна.",
"_____no_output_____"
]
],
[
[
"# нужна функция для добавления колонки номера предложения зфтвф\nsentence_number_list = []\ni = 0\nfor document in tqdm(tagged_sentences_list):\n for sentence in document:\n for word in sentence:\n sentence_number_list.append(i)\n i += 1",
"100%|██████████| 386254/386254 [01:05<00:00, 5930.58it/s]\n"
],
[
"len(sentence_number_list)",
"_____no_output_____"
],
[
"sentence_number_list_180h = sentence_number_list[:11932678]\nlen(sentence_number_list_180h)",
"_____no_output_____"
],
[
"len(conll_df)",
"_____no_output_____"
],
[
"conll_df['sentence_number'] = sentence_number_list_180h",
"_____no_output_____"
],
[
"conll_df['zeros'] = conll_df['zeros'].astype(np.int64)",
"_____no_output_____"
],
[
"conll_df['word_num_sent'] = conll_df['word_num_sent'].astype(np.int64)",
"_____no_output_____"
],
[
"conll_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 11932678 entries, 0 to 310\nData columns (total 14 columns):\ndoc_name object\nzeros int64\nword_num_sent int64\nsent_entry object\npos object\nstar1 object\nempty1 object\nempty2 object\nempty3 object\nempty4 object\nempty5 object\nstar2 object\ncoref object\nsentence_number int64\ndtypes: int64(3), object(11)\nmemory usage: 1.3+ GB\n"
],
[
"conll_df.head()",
"_____no_output_____"
]
],
[
[
"### Заменить тире на два нижних подчеркивания (__)",
"_____no_output_____"
]
],
[
[
"def remean_points(cell):\n cell = \"__\"\n return cell\n\nconll_df.empty5 = conll_df.empty5.apply(remean_points)\nconll_df.head()",
"_____no_output_____"
]
],
[
[
"Замена успешна",
"_____no_output_____"
],
[
"### Заменить двойные кавычки",
"_____no_output_____"
]
],
[
[
"changed_cells = []\ndef remean_points(cell):\n if str(cell) == '\"':\n cell = \"'\"\n changed_cells.append(1)\n else:\n changed_cells.append(0)\n return cell\n\nconll_df.sent_entry.apply(remean_points)",
"_____no_output_____"
],
[
"len(changed_cells)\n[i for i, e in enumerate(changed_cells) if e == 1]",
"_____no_output_____"
]
],
[
[
"Двойных кавычек нет, можно будет спокойно удалять.",
"_____no_output_____"
]
],
[
[
"# правкой тегов имеет смысл заняться после получения результатов на имеющемся тегсете.",
"_____no_output_____"
]
],
[
[
"## Подготовка документа",
"_____no_output_____"
]
],
[
[
"conll_df['join'] = conll_df['doc_name'] + ' ' + conll_df['zeros'].astype(str) + ' ' + conll_df['word_num_sent'].astype(str) + ' ' + conll_df['sent_entry'] + ' ' + conll_df['pos'] + ' ' + conll_df['star1'] + ' ' + conll_df['empty1'] + ' ' + conll_df['empty2'] + ' ' + conll_df['empty3'] + ' ' + conll_df['empty4'] + ' ' + conll_df['empty5'] + ' ' + conll_df['star2'] + ' ' + conll_df['coref']\n\n\n",
"_____no_output_____"
],
[
"for i, g in tqdm(conll_df.groupby('sentence_number')['join']):\n out = g.append(pd.Series({'new':np.nan}))\n out.to_csv('diploma_data_12m.txt', index=False, header=None, mode='a')\n",
"Exception ignored in: <function tqdm.__del__ at 0x7fba6d99a4d0>\nTraceback (most recent call last):\n File \"/Users/egor/opt/anaconda3/lib/python3.7/site-packages/tqdm/std.py\", line 1145, in __del__\n self.close()\n File \"/Users/egor/opt/anaconda3/lib/python3.7/site-packages/tqdm/std.py\", line 1274, in close\n if self.last_print_t < self.start_t + self.delay:\nAttributeError: 'tqdm' object has no attribute 'last_print_t'\nException ignored in: <function tqdm.__del__ at 0x7fba6d99a4d0>\nTraceback (most recent call last):\n File \"/Users/egor/opt/anaconda3/lib/python3.7/site-packages/tqdm/std.py\", line 1145, in __del__\n self.close()\n File \"/Users/egor/opt/anaconda3/lib/python3.7/site-packages/tqdm/std.py\", line 1274, in close\n if self.last_print_t < self.start_t + self.delay:\nAttributeError: 'tqdm' object has no attribute 'last_print_t'\nException ignored in: <function tqdm.__del__ at 0x7fba6d99a4d0>\nTraceback (most recent call last):\n File \"/Users/egor/opt/anaconda3/lib/python3.7/site-packages/tqdm/std.py\", line 1145, in __del__\n self.close()\n File \"/Users/egor/opt/anaconda3/lib/python3.7/site-packages/tqdm/std.py\", line 1274, in close\n if self.last_print_t < self.start_t + self.delay:\nAttributeError: 'tqdm' object has no attribute 'last_print_t'\n 0%| | 0/748278 [00:00<?, ?it/s]/Users/egor/opt/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.\n This is separate from the ipykernel package so we can avoid doing imports until\n100%|██████████| 748278/748278 [29:25<00:00, 423.76it/s] \n"
]
],
[
[
"### Деление на подгруппы для заданий берта",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"RANDOM_SEED = 1\nnp.random.seed(RANDOM_SEED)",
"_____no_output_____"
],
[
"!pwd",
"/Users/egor/Desktop/SemanticHub/diploma\r\n"
]
],
[
[
"К сожалению тетрадка упала, поэтому придется загрузить документ сначала.",
"_____no_output_____"
]
],
[
[
"path = 'diploma_data_12m.txt'\nwith open(path) as f:\n contents = f.readlines()\ncontents[:5]",
"_____no_output_____"
],
[
"len(contents)",
"_____no_output_____"
],
[
"# удаление \n# pattern = re.compile('\\\"')\nconll = []\nfor row in tqdm(contents):\n row = re.sub(r'\"','', row)\n conll.append(row)",
"100%|██████████| 12680956/12680956 [00:45<00:00, 281665.04it/s]\n"
],
[
"len(conll)",
"_____no_output_____"
]
],
[
[
"Лишние артефактные кавычки очищены.",
"_____no_output_____"
]
],
[
[
"# файл со всеми вхождениями\n\nwith open('conll_clear_12m.txt', 'w') as f:\n f.writelines(conll)",
"_____no_output_____"
]
],
[
[
"В окошки ниже впишу номера кусков **conll [ ]** для быстрого создания вручную файлов для трейн/тест/дев, таким образом можно будет избежать нарезки предложений. \n\ntrain = 95%, dev = 2.5%, test = 2.5%",
"_____no_output_____"
]
],
[
[
"# файл test\n\n# group 1 паттерна будет имя документа, ноль и пробелы для уверенности, но матч должен и без них работать\npattern = re.compile(r'(.*xml) 0')\n# заполним позицию предыдущего файла первым файлом в списке для удобства.\nprevious_filename = pattern.match(conll[0]).group(1)\n# счетчик документов\ndoc_num = 1\n\nwith open('test.russian_12m_comments.v4_gold_conll', 'w') as f:\n # запишем первую строку\n f.write('#begin document (%d); part 0\\n' % doc_num)\n \n for row in tqdm(conll[:317027]):\n \n # если строка просто разделитель предложений, пропускаем ее\n if row == '\\n':\n f.write(row)\n continue\n \n # получаем имя документа, на строку которого сейчас смотрим \n now_filename = pattern.match(row).group(1)\n \n if now_filename == previous_filename:\n f.write(row)\n previous_filename = now_filename # вроде неопасно, копирование для строк не нужно\n \n else:\n f.write('#end document\\n')\n doc_num += 1\n f.write('#begin document (%d); part 0\\n' % doc_num)\n f.write(row)\n previous_filename = now_filename\n \n f.write('#end document\\n')\n \n \n ",
"100%|██████████| 317027/317027 [00:01<00:00, 218343.40it/s]\n"
],
[
"# файл dev\n\n# group 1 паттерна будет имя документа, ноль и пробелы для уверенности, но матч должен и без них работать\npattern = re.compile(r'(.*xml) 0')\n\n# заполним позицию предыдущего файла первым файлом в списке для удобства,\n# позиция +1 от предыдущей для пропуска \\n\nprevious_filename = pattern.match(conll[317028]).group(1)\n\n# счетчик документов\ndoc_num = 1\n\nwith open('dev.russian_12m_comments.v4_gold_conll', 'w') as f:\n \n # запишем первую строку\n f.write('#begin document (%d); part 0\\n' % doc_num)\n \n for row in tqdm(conll[317028:634212]):\n \n # если строка просто разделитель предложений, пропускаем ее\n if row == '\\n':\n f.write(row)\n continue\n \n # получаем имя документа, на строку которого сейчас смотрим \n now_filename = pattern.match(row).group(1)\n \n if now_filename == previous_filename:\n f.write(row)\n previous_filename = now_filename # вроде неопасно, копирование для строк не нужно\n \n else:\n f.write('#end document\\n')\n doc_num += 1\n f.write('#begin document (%d); part 0\\n' % doc_num)\n f.write(row)\n previous_filename = now_filename\n \n \n ",
"100%|██████████| 317184/317184 [00:01<00:00, 252614.10it/s]\n"
],
[
"# файл train\n \n# group 1 паттерна будет имя документа, ноль и пробелы для уверенности, но матч должен и без них работать\npattern = re.compile(r'(.*xml) 0')\n\n# заполним позицию предыдущего файла первым файлом в списке для удобства.\nprevious_filename = pattern.match(conll[634213]).group(1)\n\n# счетчик документов\ndoc_num = 1\n\nwith open('train.russian_12m_comments.v4_gold_conll', 'w') as f:\n \n # запишем первую строку\n f.write('#begin document (%d); part 0\\n' % doc_num)\n \n for row in tqdm(conll[634213:]):\n \n # если строка просто разделитель предложений, пропускаем ее\n if row == '\\n':\n f.write(row)\n continue\n \n # получаем имя документа, на строку которого сейчас смотрим \n now_filename = pattern.match(row).group(1)\n \n if now_filename == previous_filename:\n f.write(row)\n previous_filename = now_filename # вроде неопасно, копирование для строк не нужно\n \n else:\n f.write('#end document\\n')\n doc_num += 1\n f.write('#begin document (%d); part 0\\n' % doc_num)\n f.write(row)\n previous_filename = now_filename\n \n \n ",
"100%|██████████| 12046743/12046743 [01:01<00:00, 194344.64it/s]\n"
],
[
"col_names=['doc_name', 'zeros', 'word_num_sent', 'sent_entry', 'pos', 'star1', 'empty1', 'empty2', 'empty3', 'empty4', 'empty5', 'star2', 'coref']\nconll_df = pd.read_csv('conll_clear_12m.txt', sep=' ', engine='python', names=col_names)\n\n\n",
"_____no_output_____"
],
[
"# пока поделим выборку просто по численным размерам \ndf_train, df_test = train_test_split(conll_df, test_size=0.1, random_state=RANDOM_SEED, shuffle=False)\ndf_val, df_test = train_test_split(df_test, test_size=0.5, random_state=RANDOM_SEED, shuffle=False)",
"_____no_output_____"
],
[
"df_train.shape, df_val.shape, df_test.shape",
"_____no_output_____"
]
],
[
[
"TODO:\n+ достать пол/дату из текста сообщений\n+ узнать какая есть еще инфа про сообщения\n+ убрать вхождения не важные для медицинского анализа\n+ улучшить постеггирование, для знаков перпинания добавить собсвтенные символы, привести к универсальному формату\n+ улучишть способ поиска расположения кореференции\n+ Объединить в единое аббревиатуры\n+ правильно очистить документы и расставить кореференцию только на нужные медицинские использования\n+ улучшить качество выборки, чтобы не выделялось ничего в названиях и префиксах документов, отсеить такие вхождения(исполнено в конлл форматировании)\n+ улучшить теггирование с помощью спейси https://pypi.org/project/spacy-conll/\n+ переложить на новые берты https://github.com/google-research/bert",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e79aca239098eedce24984b033de92de82c64bd7 | 1,715 | ipynb | Jupyter Notebook | notebooks/bash.ipynb | elcolumbio/waipawama | 6ca23c3a2f35ba07762fb68d6ce115ff8f826903 | [
"MIT"
] | null | null | null | notebooks/bash.ipynb | elcolumbio/waipawama | 6ca23c3a2f35ba07762fb68d6ce115ff8f826903 | [
"MIT"
] | null | null | null | notebooks/bash.ipynb | elcolumbio/waipawama | 6ca23c3a2f35ba07762fb68d6ce115ff8f826903 | [
"MIT"
] | null | null | null | 17.864583 | 94 | 0.522449 | [
[
[
"# DBT",
"_____no_output_____"
],
[
"1. Activate virtual environment, because airflow and dbt have both a lot of dependencies",
"_____no_output_____"
]
],
[
[
"source /home/flo/dbt-env/bin/activate",
"_____no_output_____"
]
],
[
[
"2. Compile your sql model to a runable query, its nice for debugging",
"_____no_output_____"
]
],
[
[
"dbt compile --vars 'timespan: 2018-10'",
"_____no_output_____"
]
],
[
[
"2. If you only want to run 1 modal a time",
"_____no_output_____"
]
],
[
[
"dbt run --models bankkonto_monthly --vars 'timespan: 2018-01'",
"_____no_output_____"
]
],
[
[
"# Airflow",
"_____no_output_____"
]
]
] | [
"markdown",
"raw",
"markdown",
"raw",
"markdown",
"raw",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"raw"
],
[
"markdown"
],
[
"raw"
],
[
"markdown"
]
] |
e79ad14de51743c400243dc7d8629eb9c7a76706 | 24,421 | ipynb | Jupyter Notebook | work2.ipynb | Mo5mami/kinasa_2nd_place | 10327a07969124ec24c5e9f0ec6a547e8a11abd7 | [
"MIT"
] | 4 | 2020-10-19T11:00:01.000Z | 2022-02-17T10:01:56.000Z | work2.ipynb | Mo5mami/kinasa_2nd_place | 10327a07969124ec24c5e9f0ec6a547e8a11abd7 | [
"MIT"
] | null | null | null | work2.ipynb | Mo5mami/kinasa_2nd_place | 10327a07969124ec24c5e9f0ec6a547e8a11abd7 | [
"MIT"
] | 2 | 2021-01-19T13:36:51.000Z | 2021-05-18T22:55:43.000Z | 34.251052 | 1,964 | 0.528234 | [
[
[
"import argparse\nimport sys\nimport os\nimport random\nfrom tqdm.notebook import tqdm\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.model_selection import KFold,StratifiedKFold\nimport cv2\nimport gc\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.stats.mstats import gmean\n\nfrom tensorflow.keras.layers import Input,Dense,Dropout,Embedding,Concatenate,Flatten,LSTM ,Bidirectional,GRU\nfrom tensorflow.keras.activations import relu ,sigmoid,softmax\nfrom tensorflow.keras.losses import CategoricalCrossentropy\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom tensorflow_addons.optimizers import AdamW",
"_____no_output_____"
],
[
"def seed_all(seed_value):\n random.seed(seed_value) # Python\n np.random.seed(seed_value) # cpu vars\n torch.manual_seed(seed_value) # cpu vars\n tf.random.set_seed(seed_value+1000)\n #os.environ['PYTHONHASHSEED'] = str(seed_value)\n #os.environ['TF_DETERMINISTIC_OPS'] = '1'\n #os.environ['TF_KERAS'] = '1'\n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed_value)\n torch.cuda.manual_seed_all(seed_value) # gpu vars\n torch.backends.cudnn.deterministic = True #needed\n torch.backends.cudnn.benchmark = False\n\nseed_all(42)",
"_____no_output_____"
]
],
[
[
"# Config",
"_____no_output_____"
]
],
[
[
"class Config:\n n_folds=10\n random_state=42\n tbs = 1024\n vbs = 512\n data_path=\"data\"\n result_path=\"results\"\n models_path=\"models\"",
"_____no_output_____"
]
],
[
[
"# plot and util",
"_____no_output_____"
]
],
[
[
"def write_to_txt(file_name,column):\n with open(file_name, 'w') as f:\n for item in column:\n f.write(\"%s\\n\" % item)",
"_____no_output_____"
]
],
[
[
"# Load data",
"_____no_output_____"
]
],
[
[
"train=pd.read_csv(os.path.join(Config.data_path,\"train.csv\"))\ntest=pd.read_csv(os.path.join(Config.data_path,\"test.csv\"))\naae=pd.read_csv(os.path.join(Config.data_path,\"amino_acid_embeddings.csv\"))\nsubmission=pd.read_csv(os.path.join(Config.data_path,\"SampleSubmission.csv\"))",
"_____no_output_____"
]
],
[
[
"# Prepare and split data",
"_____no_output_____"
]
],
[
[
"train[\"Sequence_len\"]=train[\"Sequence\"].apply(lambda x : len(x))\ntest[\"Sequence_len\"]=test[\"Sequence\"].apply(lambda x : len(x))",
"_____no_output_____"
],
[
"max_seq_length = 550 # max seq length in this data set is 550 ",
"_____no_output_____"
],
[
"#stratified k fold\ntrain[\"folds\"]=-1\nkf = StratifiedKFold(n_splits=Config.n_folds, random_state=Config.random_state, shuffle=True)\nfor fold, (_, val_index) in enumerate(kf.split(train,train[\"target\"])):\n train.loc[val_index, \"folds\"] = fold",
"_____no_output_____"
],
[
"train.head()",
"_____no_output_____"
],
[
"# reduce seq length\nif max_seq_length>550 : \n train[\"Sequence\"] = train[\"Sequence\"].apply(lambda x: \"\".join(list(x)[0:max_seq_length]))\n test[\"Sequence\"] = test[\"Sequence\"].apply(lambda x: \"\".join(list(x)[0:max_seq_length]))",
"_____no_output_____"
],
[
"voc_set = set(['P', 'V', 'I', 'K', 'N', 'B', 'F', 'Y', 'E', 'W', 'R', 'D', 'X', 'S', 'C', 'U', 'Q', 'A', 'M', 'H', 'L', 'G', 'T'])\nvoc_set_map = { k:v for k , v in zip(voc_set,range(1,len(voc_set)+1))}\nnumber_of_class = train[\"target\"].nunique()",
"_____no_output_____"
],
[
"def encode(text_tensor, label):\n encoded_text = [ voc_set_map[e] for e in list(text_tensor.numpy().decode())]\n return encoded_text, label\ndef encode_map_fn(text, label):\n # py_func doesn't set the shape of the returned tensors.\n encoded_text, label = tf.py_function(encode, \n inp=[text, label], \n Tout=(tf.int64, tf.int64))\n encoded_text.set_shape([None])\n label=tf.one_hot(label,number_of_class)\n label.set_shape([number_of_class])\n \n return encoded_text, label\ndef get_data_loader(file,batch_size,labels):\n \n label_data=tf.data.Dataset.from_tensor_slices(labels)\n data_set=tf.data.TextLineDataset(file)\n data_set=tf.data.Dataset.zip((data_set,label_data))\n\n data_set=data_set.repeat()\n data_set = data_set.shuffle(len(labels))\n data_set=data_set.map(encode_map_fn,tf.data.experimental.AUTOTUNE)\n data_set=data_set.padded_batch(batch_size)\n data_set = data_set.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return data_set\n\n\ndef get_data_loader_test(file,batch_size,labels):\n \n label_data=tf.data.Dataset.from_tensor_slices(labels.target)\n data_set=tf.data.TextLineDataset(file)\n data_set=tf.data.Dataset.zip((data_set,label_data))\n data_set=data_set.map(encode_map_fn,tf.data.experimental.AUTOTUNE)\n data_set=data_set.padded_batch(batch_size)\n data_set = data_set.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n return data_set",
"_____no_output_____"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"def model():\n name = \"seq\"\n dropout_rate = 0.1\n learning_rate = 0.001\n sequnce = Input([None],name=\"sequnce\")\n \n EMB_layer = Embedding(input_dim = len(voc_set)+1, output_dim = 64, name = \"emb_layer\")\n \n GRU_layer_2 = GRU(units=256, name = \"gru_2\", return_sequences = False)\n BIDIR_layer_2 = Bidirectional(GRU_layer_2, name=\"bidirectional_2\")\n \n Dens_layer_1 = Dense(units=512, activation=relu, kernel_regularizer=None, bias_regularizer=None, name=name+\"_dense_layer_1\")\n Dens_layer_2 = Dense(units=256, activation=relu, kernel_regularizer=None, bias_regularizer=None, name=name+\"_dense_layer_2\")\n \n output = Dense(units=number_of_class, activation=softmax, kernel_regularizer=None, bias_regularizer=None, name=name+\"_dense_layer_output\")\n \n dropout_1 = Dropout(dropout_rate)\n \n \n emb_layer = EMB_layer(sequnce)\n logits = output(Dens_layer_2(dropout_1(Dens_layer_1(BIDIR_layer_2(emb_layer)))))\n\n \n model = tf.keras.Model(inputs={\"sequnce\":sequnce, },outputs=logits) \n \n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n #loss= tfa.losses.SigmoidFocalCrossEntropy(reduction=tf.keras.losses.Reduction.AUTO)\n loss=CategoricalCrossentropy()\n model.compile(optimizer=optimizer, loss=loss, metrics=[tf.keras.metrics.CategoricalAccuracy(name=\"Acc\")]) \n model.summary()\n return model \n",
"_____no_output_____"
]
],
[
[
"# training ",
"_____no_output_____"
]
],
[
[
"def trainn(fold):\n model_path=f\"model_{fold}.h5\"\n df_train = train[train[\"folds\"] != fold].reset_index(drop=True)\n df_valid = train[train[\"folds\"] == fold].reset_index(drop=True)\n write_to_txt(f\"data/train_{fold}.txt\",df_train.Sequence)\n write_to_txt(f\"data/valid_{fold}.txt\",df_valid.Sequence)\n train_label=df_train[\"target\"]\n valid_label=df_valid[\"target\"]\n train_dl = get_data_loader(f\"data/train_{fold}.txt\",Config.tbs,train_label)\n valid_dl = get_data_loader(f\"data/valid_{fold}.txt\",Config.vbs,valid_label)\n checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(Config.models_path,model_path),\n save_weights_only=True,monitor = 'val_loss',\n save_best_only=True,mode=\"min\", verbose=1)\n callbacks=[checkpoint]\n my_model = model()\n \n history = my_model.fit(train_dl,\n validation_data=valid_dl,\n epochs=15,\n verbose=1,\n batch_size=Config.tbs,\n validation_batch_size=Config.vbs,\n validation_steps=len(df_valid)//Config.vbs,\n steps_per_epoch=len(df_train)/Config.tbs,\n callbacks=callbacks\n )\n ",
"_____no_output_____"
],
[
"def predict(fold):\n model_path=f\"model_{fold}.h5\"\n write_to_txt(f\"data/test_{fold}.txt\",test.Sequence)\n test[\"target\"]=0\n test_label=test[\"target\"]\n test_dl = get_data_loader_test(f\"data/test_{fold}.txt\",Config.vbs,test)\n my_model = model()\n my_model.load_weights(os.path.join(Config.models_path,model_path))\n prediction=my_model.predict(test_dl)\n return prediction",
"_____no_output_____"
],
[
"trainn(2)",
"Model: \"functional_5\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nsequnce (InputLayer) [(None, None)] 0 \n_________________________________________________________________\nemb_layer (Embedding) (None, None, 128) 3072 \n_________________________________________________________________\nbidirectional_2 (Bidirection (None, 512) 592896 \n_________________________________________________________________\nseq_dense_layer_1 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 512) 0 \n_________________________________________________________________\nseq_dense_layer_2 (Dense) (None, 256) 131328 \n_________________________________________________________________\nseq_dense_layer_output (Dens (None, 8) 2056 \n=================================================================\nTotal params: 992,008\nTrainable params: 992,008\nNon-trainable params: 0\n_________________________________________________________________\nEpoch 1/15\n176/399 [============>.................] - ETA: 3:52 - loss: 0.0079 - Acc: 0.9976"
],
[
"p=predict(2)",
"Model: \"functional_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nsequnce (InputLayer) [(None, None)] 0 \n_________________________________________________________________\nemb_layer (Embedding) (None, None, 128) 3072 \n_________________________________________________________________\nbidirectional_2 (Bidirection (None, 512) 592896 \n_________________________________________________________________\nseq_dense_layer_1 (Dense) (None, 512) 262656 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 512) 0 \n_________________________________________________________________\nseq_dense_layer_2 (Dense) (None, 256) 131328 \n_________________________________________________________________\nseq_dense_layer_output (Dens (None, 8) 2056 \n=================================================================\nTotal params: 992,008\nTrainable params: 992,008\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"sub=test[[\"ID\"]].copy()\nfor i in range(number_of_class):\n sub[\"target_{}\".format(i)]=p[:,i]",
"_____no_output_____"
],
[
"sub.head()",
"_____no_output_____"
],
[
"sub.to_csv(os.path.join(Config.result_path,\"sub_p2_epoch15.csv\"),index=False)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79ad54a1d6156d4dc1a50b60beba1d94efb37ed | 525,094 | ipynb | Jupyter Notebook | poeun/14_1.ipynb | ingu627/handson-ml2 | fa16881b044750a895ba6eddf98c1d9bac198917 | [
"Apache-2.0"
] | 1 | 2021-11-13T02:04:22.000Z | 2021-11-13T02:04:22.000Z | poeun/14_1.ipynb | ingu627/handson-ml2 | fa16881b044750a895ba6eddf98c1d9bac198917 | [
"Apache-2.0"
] | null | null | null | poeun/14_1.ipynb | ingu627/handson-ml2 | fa16881b044750a895ba6eddf98c1d9bac198917 | [
"Apache-2.0"
] | null | null | null | 2,949.966292 | 193,486 | 0.963376 | [
[
[
"from sklearn.datasets import load_sample_image\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nchina = load_sample_image('china.jpg') / 255\nflower = load_sample_image(\"flower.jpg\") / 255\nimages = np.array([china, flower])\nbatch_size, height, width, channels = images.shape\n\nfilters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)\nfilters[:, 3, :, 0] = 1\nfilters[3, :, :, 1] = 1\n\noutputs = tf.nn.conv2d(images, filters, strides=1, padding='SAME')\n\nplt.imshow(outputs[1, :, :, 1], cmap='gray')\nplt.show()",
"_____no_output_____"
],
[
"plt.imshow(china)",
"_____no_output_____"
],
[
"plt.imshow(flower)",
"_____no_output_____"
],
[
"def plot_image(image):\n plt.imshow(image, cmap=\"gray\", interpolation=\"nearest\")\n plt.axis(\"off\")\n\ndef plot_color_image(image):\n plt.imshow(image, interpolation=\"nearest\")\n plt.axis(\"off\")",
"_____no_output_____"
],
[
"for image_index in (0, 1):\n for feature_map_index in (0, 1):\n plt.subplot(2, 2, image_index * 2 + feature_map_index + 1)\n plot_image(outputs[image_index, :, :, feature_map_index])\n\nplt.show()",
"_____no_output_____"
],
[
"tf.keras.layers.MaxPool2D(\n pool_size=(2, 2), strides=None, padding='valid', data_format=None\n)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79ad6015214b792e52c3f9026d485558ee40b3f | 171,554 | ipynb | Jupyter Notebook | Model backlog/Train/75-jigsaw-fold1-xlm-roberta-large-cls-tail.ipynb | dimitreOliveira/Jigsaw-Multilingual-Toxic-Comment-Classification | 44422e6aeeff227e22dbb5c05101322e9d4aabbe | [
"MIT"
] | 4 | 2020-06-23T02:31:07.000Z | 2020-07-04T11:50:08.000Z | Model backlog/Train/75-jigsaw-fold1-xlm-roberta-large-cls-tail.ipynb | dimitreOliveira/Jigsaw-Multilingual-Toxic-Comment-Classification | 44422e6aeeff227e22dbb5c05101322e9d4aabbe | [
"MIT"
] | null | null | null | Model backlog/Train/75-jigsaw-fold1-xlm-roberta-large-cls-tail.ipynb | dimitreOliveira/Jigsaw-Multilingual-Toxic-Comment-Classification | 44422e6aeeff227e22dbb5c05101322e9d4aabbe | [
"MIT"
] | null | null | null | 132.576507 | 61,164 | 0.832385 | [
[
[
"## Dependencies",
"_____no_output_____"
]
],
[
[
"import json, warnings, shutil, glob\nfrom jigsaw_utility_scripts import *\nfrom scripts_step_lr_schedulers import *\nfrom transformers import TFXLMRobertaModel, XLMRobertaConfig\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import optimizers, metrics, losses, layers\n\nSEED = 0\nseed_everything(SEED)\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
]
],
[
[
"## TPU configuration",
"_____no_output_____"
]
],
[
[
"strategy, tpu = set_up_strategy()\nprint(\"REPLICAS: \", strategy.num_replicas_in_sync)\nAUTO = tf.data.experimental.AUTOTUNE",
"Running on TPU grpc://10.0.0.2:8470\nREPLICAS: 8\n"
]
],
[
[
"# Load data",
"_____no_output_____"
]
],
[
[
"database_base_path = '/kaggle/input/jigsaw-data-split-roberta-192-ratio-2-upper/'\nk_fold = pd.read_csv(database_base_path + '5-fold.csv')\nvalid_df = pd.read_csv(\"/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv\", \n usecols=['comment_text', 'toxic', 'lang'])\n\nprint('Train samples: %d' % len(k_fold))\ndisplay(k_fold.head())\nprint('Validation samples: %d' % len(valid_df))\ndisplay(valid_df.head())\n\nbase_data_path = 'fold_1/'\n# Unzip files\n!tar -xvf /kaggle/input/jigsaw-data-split-roberta-192-ratio-2-upper/fold_1.tar.gz",
"Train samples: 400830\n"
]
],
[
[
"# Model parameters",
"_____no_output_____"
]
],
[
[
"base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/'\n\nconfig = {\n \"MAX_LEN\": 192,\n \"BATCH_SIZE\": 128,\n \"EPOCHS\": 4,\n \"LEARNING_RATE\": 1e-5, \n \"ES_PATIENCE\": None,\n \"base_model_path\": base_path + 'tf-xlm-roberta-large-tf_model.h5',\n \"config_path\": base_path + 'xlm-roberta-large-config.json'\n}\n\nwith open('config.json', 'w') as json_file:\n json.dump(json.loads(json.dumps(config)), json_file)",
"_____no_output_____"
]
],
[
[
"## Learning rate schedule",
"_____no_output_____"
]
],
[
[
"lr_min = 1e-7\nlr_start = 1e-7\nlr_max = config['LEARNING_RATE']\nstep_size = len(k_fold[k_fold['fold_1'] == 'train']) // config['BATCH_SIZE']\ntotal_steps = config['EPOCHS'] * step_size\nhold_max_steps = 0\nwarmup_steps = step_size * 1\ndecay = .9997\n\nrng = [i for i in range(0, total_steps, config['BATCH_SIZE'])]\ny = [exponential_schedule_with_warmup(tf.cast(x, tf.float32), warmup_steps, hold_max_steps, \n lr_start, lr_max, lr_min, decay) for x in rng]\n\nsns.set(style=\"whitegrid\")\nfig, ax = plt.subplots(figsize=(20, 6))\nplt.plot(rng, y)\nprint(\"Learning rate schedule: {:.3g} to {:.3g} to {:.3g}\".format(y[0], max(y), y[-1]))",
"Learning rate schedule: 1e-07 to 9.84e-06 to 1.06e-06\n"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)\n\ndef model_fn(MAX_LEN):\n input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')\n attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')\n \n base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)\n last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})\n cls_token = last_hidden_state[:, 0, :]\n \n output = layers.Dense(1, activation='sigmoid', name='output')(cls_token)\n \n model = Model(inputs=[input_ids, attention_mask], outputs=output)\n \n return model",
"_____no_output_____"
]
],
[
[
"# Train",
"_____no_output_____"
]
],
[
[
"# Load data\nx_train = np.load(base_data_path + 'x_train.npy')\ny_train = np.load(base_data_path + 'y_train_int.npy').reshape(x_train.shape[1], 1).astype(np.float32)\nx_valid_ml = np.load(database_base_path + 'x_valid.npy')\ny_valid_ml = np.load(database_base_path + 'y_valid.npy').reshape(x_valid_ml.shape[1], 1).astype(np.float32)\n\n#################### ADD TAIL ####################\nx_train = np.hstack([x_train, np.load(base_data_path + 'x_train_tail.npy')])\ny_train = np.vstack([y_train, y_train])\n\nstep_size = x_train.shape[1] // config['BATCH_SIZE']\nvalid_step_size = x_valid_ml.shape[1] // config['BATCH_SIZE']\n\n# Build TF datasets\ntrain_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED))\nvalid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED))\ntrain_data_iter = iter(train_dist_ds)\nvalid_data_iter = iter(valid_dist_ds)\n\n# Step functions\[email protected]\ndef train_step(data_iter):\n def train_step_fn(x, y):\n with tf.GradientTape() as tape:\n probabilities = model(x, training=True)\n loss = loss_fn(y, probabilities)\n grads = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n train_auc.update_state(y, probabilities)\n train_loss.update_state(loss)\n for _ in tf.range(step_size):\n strategy.experimental_run_v2(train_step_fn, next(data_iter))\n\[email protected]\ndef valid_step(data_iter):\n def valid_step_fn(x, y):\n probabilities = model(x, training=False)\n loss = loss_fn(y, probabilities)\n valid_auc.update_state(y, probabilities)\n valid_loss.update_state(loss)\n for _ in tf.range(valid_step_size):\n strategy.experimental_run_v2(valid_step_fn, next(data_iter))\n\n# Train model\nwith strategy.scope():\n model = model_fn(config['MAX_LEN'])\n optimizer = optimizers.Adam(learning_rate=lambda: \n exponential_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32), \n warmup_steps, hold_max_steps, lr_start, \n lr_max, lr_min, decay))\n loss_fn = losses.binary_crossentropy\n train_auc = metrics.AUC()\n valid_auc = metrics.AUC()\n train_loss = metrics.Sum()\n valid_loss = metrics.Sum()\n\nmetrics_dict = {'loss': train_loss, 'auc': train_auc, \n 'val_loss': valid_loss, 'val_auc': valid_auc}\n\nhistory = custom_fit(model, metrics_dict, train_step, valid_step, train_data_iter, valid_data_iter, \n step_size, valid_step_size, config['BATCH_SIZE'], config['EPOCHS'], \n config['ES_PATIENCE'], save_last=False)\n# model.save_weights('model.h5')\n\n# Make predictions\n# x_train = np.load(base_data_path + 'x_train.npy')\n# x_valid = np.load(base_data_path + 'x_valid.npy')\nx_valid_ml_eval = np.load(database_base_path + 'x_valid.npy')\n\n# train_preds = model.predict(get_test_dataset(x_train, config['BATCH_SIZE'], AUTO))\n# valid_preds = model.predict(get_test_dataset(x_valid, config['BATCH_SIZE'], AUTO))\nvalid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO))\n\n# k_fold.loc[k_fold['fold_1'] == 'train', 'pred_1'] = np.round(train_preds)\n# k_fold.loc[k_fold['fold_1'] == 'validation', 'pred_1'] = np.round(valid_preds)\nvalid_df['pred_1'] = valid_ml_preds\n\n\n# Fine-tune on validation set\n#################### ADD TAIL ####################\nx_valid_ml_tail = np.hstack([x_valid_ml, np.load(database_base_path + 'x_valid_tail.npy')])\ny_valid_ml_tail = np.vstack([y_valid_ml, y_valid_ml])\n\nvalid_step_size_tail = x_valid_ml_tail.shape[1] // config['BATCH_SIZE']\n\n# Build TF datasets\ntrain_ml_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_valid_ml_tail, y_valid_ml_tail, \n config['BATCH_SIZE'], AUTO, seed=SEED))\ntrain_ml_data_iter = iter(train_ml_dist_ds)\n\nhistory_ml = custom_fit(model, metrics_dict, train_step, valid_step, train_ml_data_iter, valid_data_iter, \n valid_step_size_tail, valid_step_size, config['BATCH_SIZE'], 1, \n config['ES_PATIENCE'], save_last=False)\nmodel.save_weights('model_ml.h5')\n\n# Make predictions\nvalid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO))\nvalid_df['pred_ml_1'] = valid_ml_preds\n\n### Delete data dir\nshutil.rmtree(base_data_path)",
"Train for 5010 steps, validate for 62 steps\n\nEPOCH 1/4\ntime: 1715.0s loss: 0.2442 auc: 0.9590 val_loss: 0.2856 val_auc: 0.9211\n\nEPOCH 2/4\ntime: 1520.0s loss: 0.1623 auc: 0.9816 val_loss: 0.2865 val_auc: 0.9164\n\nEPOCH 3/4\ntime: 1519.8s loss: 0.1449 auc: 0.9852 val_loss: 0.3106 val_auc: 0.9086\n\nEPOCH 4/4\ntime: 1520.0s loss: 0.1406 auc: 0.9860 val_loss: 0.2875 val_auc: 0.9180\nTraining finished\nTrain for 125 steps, validate for 62 steps\n\nEPOCH 1/1\ntime: 1623.4s loss: 7.3732 auc: 0.9554 val_loss: 0.1360 val_auc: 0.9772\nTraining finished\n"
]
],
[
[
"## Model loss graph",
"_____no_output_____"
]
],
[
[
"plot_metrics(history)\n# ML fine-tunned preds\nplot_metrics(history_ml)",
"_____no_output_____"
]
],
[
[
"# Model evaluation",
"_____no_output_____"
]
],
[
[
"# display(evaluate_model(k_fold, 1, label_col='toxic_int').style.applymap(color_map))",
"_____no_output_____"
]
],
[
[
"# Confusion matrix",
"_____no_output_____"
]
],
[
[
"# train_set = k_fold[k_fold['fold_1'] == 'train']\n# validation_set = k_fold[k_fold['fold_1'] == 'validation'] \n# plot_confusion_matrix(train_set['toxic_int'], train_set['pred_1'], \n# validation_set['toxic_int'], validation_set['pred_1'])",
"_____no_output_____"
]
],
[
[
"# Model evaluation by language",
"_____no_output_____"
]
],
[
[
"display(evaluate_model_lang(valid_df, 1).style.applymap(color_map))\n# ML fine-tunned preds\ndisplay(evaluate_model_lang(valid_df, 1, pred_col='pred_ml').style.applymap(color_map))",
"_____no_output_____"
]
],
[
[
"# Visualize predictions",
"_____no_output_____"
]
],
[
[
"pd.set_option('max_colwidth', 120)\nprint('English validation set')\ndisplay(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(10))\n\nprint('Multilingual validation set')\ndisplay(valid_df[['comment_text', 'toxic'] + [c for c in valid_df.columns if c.startswith('pred')]].head(10))",
"English validation set\n"
]
],
[
[
"# Test set predictions",
"_____no_output_____"
]
],
[
[
"x_test = np.load(database_base_path + 'x_test.npy')\ntest_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'], AUTO))",
"_____no_output_____"
],
[
"submission = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')\nsubmission['toxic'] = test_preds\nsubmission.to_csv('submission.csv', index=False)\n\ndisplay(submission.describe())\ndisplay(submission.head(10))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79ae4ce87f830eaa5c6e061cec88eaf5bc22c99 | 391,416 | ipynb | Jupyter Notebook | GPy/sparse_gp_regression.ipynb | olumighty1/notebook | ddb2a70491221ceb92d34cc3b7dc8b94f382192c | [
"BSD-3-Clause"
] | 143 | 2015-01-24T10:22:21.000Z | 2022-03-23T05:59:36.000Z | GPy/sparse_gp_regression.ipynb | lawrennd/notebook | de7d53f3e275a1882ec737bc16cf86bc02f64caa | [
"BSD-3-Clause"
] | 16 | 2015-09-10T17:49:22.000Z | 2022-03-24T16:27:12.000Z | GPy/sparse_gp_regression.ipynb | lawrennd/notebook | de7d53f3e275a1882ec737bc16cf86bc02f64caa | [
"BSD-3-Clause"
] | 105 | 2015-01-20T14:10:39.000Z | 2022-02-28T13:43:06.000Z | 42.087742 | 409 | 0.481861 | [
[
[
"# Sparse GP Regression\n\n### 14th January 2014 James Hensman\n#### 29th September 2014 Neil Lawrence (added sub-titles, notes and some references).",
"_____no_output_____"
],
[
"This example shows the variational compression effect of so-called 'sparse' Gaussian processes. In particular we show how using the variational free energy framework of [Titsias, 2009](http://jmlr.csail.mit.edu/proceedings/papers/v5/titsias09a/titsias09a.pdf) we can compress a Gaussian process fit. First we set up the notebook with a fixed random seed, and import GPy.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'svg'\nimport GPy\nimport numpy as np\nnp.random.seed(101)",
"_____no_output_____"
]
],
[
[
"## Sample Function\n\nNow we'll sample a Gaussian process regression problem directly from a Gaussian process prior. We'll use an exponentiated quadratic covariance function with a lengthscale and variance of 1 and sample 50 equally spaced points. ",
"_____no_output_____"
]
],
[
[
"N = 50\nnoise_var = 0.05\n\nX = np.linspace(0,10,50)[:,None]\nk = GPy.kern.RBF(1)\ny = np.random.multivariate_normal(np.zeros(N),k.K(X)+np.eye(N)*np.sqrt(noise_var)).reshape(-1,1)",
"_____no_output_____"
]
],
[
[
"## Full Gaussian Process Fit\n\nNow we use GPy to optimize the parameters of a Gaussian process given the sampled data. Here, there are no approximations, we simply fit the full Gaussian process.",
"_____no_output_____"
]
],
[
[
"m_full = GPy.models.GPRegression(X,y)\nm_full.optimize('bfgs')\nm_full.plot()\nprint m_full",
"\nName : GP regression\nObjective : 50.0860723468\nNumber of Parameters : 3\nNumber of Optimization Parameters : 3\nUpdates : True\nParameters:\n \u001b[1mGP_regression. \u001b[0;0m | value | constraints | priors\n \u001b[1mrbf.variance \u001b[0;0m | 1.65824860473 | +ve | \n \u001b[1mrbf.lengthscale \u001b[0;0m | 1.11215383162 | +ve | \n \u001b[1mGaussian_noise.variance\u001b[0;0m | 0.236134236859 | +ve | \n"
]
],
[
[
"## A Poor `Sparse' GP Fit\n\nNow we construct a sparse Gaussian process. This model uses the inducing variable approximation and initialises the inducing variables in two 'clumps'. Our initial fit uses the *correct* covariance function parameters, but a badly placed set of inducing points. ",
"_____no_output_____"
]
],
[
[
"Z = np.hstack((np.linspace(2.5,4.,3),np.linspace(7,8.5,3)))[:,None]\nm = GPy.models.SparseGPRegression(X,y,Z=Z)\nm.likelihood.variance = noise_var\nm.plot()\nprint m",
"\nName : sparse gp\nObjective : 260.809828016\nNumber of Parameters : 9\nNumber of Optimization Parameters : 9\nUpdates : True\nParameters:\n \u001b[1msparse_gp. \u001b[0;0m | value | constraints | priors\n \u001b[1minducing inputs \u001b[0;0m | (6, 1) | | \n \u001b[1mrbf.variance \u001b[0;0m | 1.0 | +ve | \n \u001b[1mrbf.lengthscale \u001b[0;0m | 1.0 | +ve | \n \u001b[1mGaussian_noise.variance\u001b[0;0m | 0.05 | +ve | \n"
]
],
[
[
"\nNotice how the fit is reasonable where there are inducing points, but bad elsewhere. \n\n### Optimizing Covariance Parameters\n\nNext, we will try and find the optimal covariance function parameters, given that the inducing inputs are held in their current location. ",
"_____no_output_____"
]
],
[
[
"m.inducing_inputs.fix()\nm.optimize('bfgs')\nm.plot()\nprint m",
"\nName : sparse gp\nObjective : 53.9735537142\nNumber of Parameters : 9\nNumber of Optimization Parameters : 3\nUpdates : True\nParameters:\n \u001b[1msparse_gp. \u001b[0;0m | value | constraints | priors\n \u001b[1minducing inputs \u001b[0;0m | (6, 1) | fixed | \n \u001b[1mrbf.variance \u001b[0;0m | 1.73905117564 | +ve | \n \u001b[1mrbf.lengthscale \u001b[0;0m | 3.02312650701 | +ve | \n \u001b[1mGaussian_noise.variance\u001b[0;0m | 0.372990010041 | +ve | \n"
]
],
[
[
"The poor location of the inducing inputs causes the model to 'underfit' the data. The lengthscale is much longer than the full GP, and the noise variance is larger. This is because in this case the Kullback Leibler term in the objective free energy is dominating, and requires a larger lengthscale to improve the quality of the approximation. This is due to the poor location of the inducing inputs. \n\n### Optimizing Inducing Inputs\n\nFirstly we try optimzing the location of the inducing inputs to fix the problem, however we still get a larger lengthscale than the Gaussian process we sampled from (or the full GP fit we did at the beginning).",
"_____no_output_____"
]
],
[
[
"m.randomize()\nm.Z.unconstrain()\nm.optimize('bfgs')\nm.plot()",
"_____no_output_____"
]
],
[
[
"The inducing points spread out to cover the data space, but the fit isn't quite there. We can try increasing the number of the inducing points.\n\n### Train with More Inducing Points\n\nNow we try 12 inducing points, rather than the original six. We then compare with the full Gaussian process likelihood.",
"_____no_output_____"
]
],
[
[
"Z = np.random.rand(12,1)*12\nm = GPy.models.SparseGPRegression(X,y,Z=Z)\n\nm.optimize('bfgs')\nm.plot()\nm_full.plot()\nprint m.log_likelihood(), m_full.log_likelihood()",
"[[-50.09844715]] -50.0860723468\n"
]
],
[
[
"This time, we have enough inducing points and the fit resembles that of the GP. This is verified by the fact that the bound on the marginal likelihood is tight, which means that our variational approximation must be good (the difference between the bound and the true likelihood is the Kullback Leibler divergence between the approximation and the truth). ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e79b00e410e24442b5464faebda72051c4989562 | 140,649 | ipynb | Jupyter Notebook | analysis/mf_grc_analysis/grc_locs/grc_locations_all_210514.ipynb | htem/cb2_project_analysis | a677cbadc7e3bf0074975a94ed1d06b4801899c0 | [
"MIT"
] | null | null | null | analysis/mf_grc_analysis/grc_locs/grc_locations_all_210514.ipynb | htem/cb2_project_analysis | a677cbadc7e3bf0074975a94ed1d06b4801899c0 | [
"MIT"
] | null | null | null | analysis/mf_grc_analysis/grc_locs/grc_locations_all_210514.ipynb | htem/cb2_project_analysis | a677cbadc7e3bf0074975a94ed1d06b4801899c0 | [
"MIT"
] | null | null | null | 382.19837 | 59,880 | 0.936068 | [
[
[
"\nimport sys\nimport importlib\nsys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc')\n\nimport my_plot\nimportlib.reload(my_plot)\nfrom my_plot import MyPlotData, my_box_plot\n\ndef to_ng_coord(coord):\n return (\n int(coord[0]/4),\n int(coord[1]/4),\n int(coord[2]/40),\n )\n\nscript_n = 'grc_locations_all_210514'\n\n# def get_eucledean_dist(a, b):\n# return np.linalg.norm(\n# (a[0]-b[0], a[1]-b[1], a[2]-b[2]))\n\n# def get_distance(u, v):\n# return get_eucledean_dist(u, v)\n\nimport compress_pickle\n# input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114_restricted_z.gz')\n# input_graph = compress_pickle.load('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/mf_grc_model/input_graph_201114.gz')\n\n\nfname = ('/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc/'\\\n 'mf_grc_model/input_graph_210407_all.gz')\ninput_graph = compress_pickle.load(fname)\n\n\ngrcs = [k for k in input_graph.grcs.keys()]\n\n# z_min = 15\n# z_max = 35\nz_min = 19800-2500\nz_max = 29800+2500\n# z_min = 16000\n# z_max = 20000\n# z_min = 0\n# z_max = 50\nx_min = 280\nx_max = 600\nmpd = MyPlotData()\n# claw_lengths = defaultdict(int)\nnum_grcs = 0\ncounted_grcs = []\nfor grc_id in input_graph.grcs:\n grc = input_graph.grcs[grc_id]\n soma_loc = grc.soma_loc\n x, y, z = soma_loc\n if x < x_min*1000 or x > x_max*1000:\n continue\n if z < z_min or z > z_max:\n continue\n mpd.add_data_point(\n# x=x/1000-x_min,\n x=x/1000/4,\n y=500-y/1000,\n z=z/40,\n claw_count=max(len(grc.edges), 2),\n )\n num_grcs += 1\n counted_grcs.append(grc_id)\n\nprint(f'Counted {num_grcs} grcs within bounds')\n\n",
"Counted 762 grcs within bounds\n"
],
[
"'grc_2854' in grcs",
"_____no_output_____"
],
[
"\ndef custom_legend_fn(plt):\n# plt.legend(bbox_to_anchor=(1.025, .8), loc='upper left', borderaxespad=0.)\n plt.legend(bbox_to_anchor=(1, .925), loc='upper left', frameon=False)\n\nsave_filename=f'{script_n}_xy.svg'\nimport seaborn as sns\nimportlib.reload(my_plot); my_plot.my_relplot(\n mpd,\n context='paper',\n font_scale=.65,\n kind='scatter',\n x=\"x\",\n y=\"y\",\n s=10,\n linewidth=0,\n alpha=.9,\n aspect=2.9,\n width=3.5,\n xlim=(None, x_max-x_min+10),\n hue=\"claw_count\",\n palette=sns.color_palette(\"mako_r\", as_cmap=True),\n save_filename=save_filename,\n y_axis_label='Y (μm)',\n# title='Granule Cell Cell Body Locations',\n# x_axis_label='Dorsal-ventral Axis: X (μm)',\n custom_legend_fn=custom_legend_fn,\n show=True,\n )\n",
"Height: 1.206896551724138, Aspect: 2.9\n"
],
[
"import matplotlib.pyplot as plt\nimport seaborn as sns\nf, ax = plt.subplots(figsize=(12, 6))\nimportlib.reload(my_plot); my_plot.my_scatterplot(\n mpd,\n x=\"x\",\n y=\"y\",\n ax=ax,\n )",
"_____no_output_____"
],
[
"\nimportlib.reload(my_plot); my_plot.my_displot(\n mpd,\n x=\"x\",\n )",
"Height: 6, Aspect: 1.33\n"
],
[
"\nimportlib.reload(my_plot); my_plot.my_displot(\n mpd,\n x=\"x\",\n y='z',\n ylim=(0, None),\n binwidth=(10, 50),\n cbar=True,\n )",
"Height: 6, Aspect: 1.33\n"
],
[
"\nimportlib.reload(my_plot); my_plot.my_displot(\n mpd,\n x=\"x\",\n y='y',\n ylim=(0, None),\n binwidth=(10, 10),\n cbar=True,\n )",
"Height: 6, Aspect: 1.33\n"
],
[
"\nsave_filename=f'{script_n}_xz.svg'\nimport seaborn as sns\nimportlib.reload(my_plot); my_plot.my_relplot(\n mpd,\n# context='paper',\n# font_scale=1,\n kind='scatter',\n x=\"x\",\n y=\"z\",\n aspect=2.9,\n width=10,\n xlim=(None, x_max-x_min+10),\n# size=\"claw_count\",\n hue=\"claw_count\",\n palette=sns.color_palette(\"mako_r\", as_cmap=True),\n # alpha=.9,\n y_axis_label='Z (μm)',\n x_axis_label='X (μm)',\n save_filename=save_filename,\n custom_legend_fn=custom_legend_fn,\n show=True,\n )\n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79b02b26d792c31a817df75e278430f2121a983 | 53,245 | ipynb | Jupyter Notebook | CT/314_statistical_communication_theory/mv_joshi/2019/lab_sessions/lab8.ipynb | u-square2/AcadVault | 571612c928a1357c849cf9a06b47bab15dc5afa3 | [
"MIT"
] | 3 | 2021-11-08T03:54:46.000Z | 2021-11-08T04:31:52.000Z | CT/314_statistical_communication_theory/mv_joshi/2019/lab_sessions/lab8.ipynb | u-square2/AcadVault | 571612c928a1357c849cf9a06b47bab15dc5afa3 | [
"MIT"
] | 1 | 2020-02-08T20:16:17.000Z | 2020-02-08T20:24:31.000Z | CT/314_statistical_communication_theory/mv_joshi/2019/lab_sessions/lab8.ipynb | u-square2/AcadVault | 571612c928a1357c849cf9a06b47bab15dc5afa3 | [
"MIT"
] | 2 | 2022-01-28T05:28:33.000Z | 2022-01-28T05:28:54.000Z | 166.390625 | 15,284 | 0.873265 | [
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import misc\nfrom scipy.io import wavfile\nfrom numpy import linalg as la\nimport matplotlib.image as mpimg\nimport glob\nfrom numpy import linalg as LA\nimport scipy.io as sio",
"_____no_output_____"
]
],
[
[
"## <font color='red'> 1. Experimentally prove weak law of large numbers. $$\\lim_{n\\to \\infty} P[\\vert M_n-m_x\\vert>\\epsilon]=0$$ Where, $M_n$ is the sample mean, $m_x$ is the actual mean, $\\epsilon$ is a small positive number and $n$ is the number of sample points.<font>",
"_____no_output_____"
]
],
[
[
"#Law of weak numbers\n#write code here\n#sample = np.random.normal(0, 1, 1000)\n#p= np.zeros(sample.shape)\n#for i in range(0,9999):\n#p[i]=np.sum(sample[0:i])/(i+1)\ncount=0\nfor i in range (1,1000):\n sample = np.random.normal(0, 1, 1000) #n=1000\n meanv= np.mean(sample)\n if meanv<0.1: #epsilon\n count+=1\ncount=float(count/1000)\nprint(count)\n ",
"0.997\n"
]
],
[
[
"## <font color='red'> 2. Experimentally prove strong law of large numbers. $$P[\\lim_{n\\to \\infty} M_n=m_x]=1,$$ Where, $M_n$ is the sample mean, $m_x$ is the actual mean, and $n$ is the number of sample points.<font>",
"_____no_output_____"
]
],
[
[
"#Law of strong numbers\n#write code here\nitr=np.arange(10000)\nactual_mean=np.zeros(10000)\nsample = np.random.normal(0, 1, 10000)\nMn= np.zeros(sample.shape)\nfor i in range(0,9999):\n Mn[i]=np.sum(sample[0:i])/(i+1)\nplt.figure()\nplt.plot(itr,actual_mean,label='actual_mean')\nplt.plot(itr,Mn,label='Mn')\nplt.legend()\nplt.figure()\nplt.plot(itr,Mn-actual_mean,label='error')\nplt.figure()\n",
"_____no_output_____"
]
],
[
[
"## <font color='red'> 3. Experimentally prove central limit theorem. $$\\frac{S_N-E[S_N]}{\\sqrt{var(S_N)}}=\\frac{\\sum_{i=1}^{N}{X_i}-\\sum_{i=1}^{N}{E[X_i]}}{\\sqrt{\\sum_{i=1}^{N}{var[X_i]}}}\\to N(0,1)$$,Where, $X_1, X_2, . . , X_N$ are random variables with mean $E[X_i]$ and variance $var[X_i]$, $i=1,2,…, N$. <font>",
"_____no_output_____"
]
],
[
[
"N=100 \nM=10000\nx = np.zeros([M,N])\n#write code here\nx=np.random.uniform(0,10,[M,N])\nplt.hist(x[:,0],normed=True)\nplt.figure()\nSn=np.sum(x,0)\nSn_mean=np.mean(Sn)\nsig=np.var(Sn)\nlim=(Sn-Sn_mean)/np.sqrt(sig)\n\n#write code here\nplt.figure()\nplt.hist(lim,normed=True)\n\nx=np.linspace(-4,4,1000)\nfx=(1/(np.sqrt(2*np.pi)))*np.exp((-x**2)/2);\nplt.plot(x,fx)",
"/usr/local/lib/python3.6/dist-packages/matplotlib/axes/_axes.py:6521: MatplotlibDeprecationWarning: \nThe 'normed' kwarg was deprecated in Matplotlib 2.1 and will be removed in 3.1. Use 'density' instead.\n alternative=\"'density'\", removal=\"3.1\")\n"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e79b05015bf5e07699eca76741f823a6e6a3bf1b | 16,254 | ipynb | Jupyter Notebook | dataquest/notebooks/lesson_linear_nonlinear_functions/Lesson - Linear and Nonlinear Functions.ipynb | monocongo/datascience_portfolio | 816028ac2dfe79a99af6f045e7d1eb18cbb017ab | [
"Unlicense"
] | null | null | null | dataquest/notebooks/lesson_linear_nonlinear_functions/Lesson - Linear and Nonlinear Functions.ipynb | monocongo/datascience_portfolio | 816028ac2dfe79a99af6f045e7d1eb18cbb017ab | [
"Unlicense"
] | null | null | null | dataquest/notebooks/lesson_linear_nonlinear_functions/Lesson - Linear and Nonlinear Functions.ipynb | monocongo/datascience_portfolio | 816028ac2dfe79a99af6f045e7d1eb18cbb017ab | [
"Unlicense"
] | null | null | null | 159.352941 | 14,264 | 0.905193 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"#### Generate a NumPy array containing 301 values from 0 to 3 and assign to x, then Transform x by applying the function: `y = -(x^2) + 3x - 1` and assign the resulting array of transformed values to y.",
"_____no_output_____"
]
],
[
[
"def transform(x):\n return -(x**2) + (3 * x) - 1\n\nx = np.linspace(0, 3, 301)\nf = lambda x : -(x**2) + (3 * x) - 1\ny = f(x)",
"_____no_output_____"
]
],
[
[
"#### Plot the X vs. Y using a simple matplotlib line plot:",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nplt.plot(x, y)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79b1e55be1d6e323a560a715baf924bf0bdcc25 | 10,862 | ipynb | Jupyter Notebook | CS42_DS_&_A_1_M2_Hash_Tables_I.ipynb | juancaruizc/CS42-DS-A-1-M2-Hash-Tables-I | bbd5c06abc74ebc4d210c7f20a4abe778f8ed3ee | [
"MIT"
] | null | null | null | CS42_DS_&_A_1_M2_Hash_Tables_I.ipynb | juancaruizc/CS42-DS-A-1-M2-Hash-Tables-I | bbd5c06abc74ebc4d210c7f20a4abe778f8ed3ee | [
"MIT"
] | null | null | null | CS42_DS_&_A_1_M2_Hash_Tables_I.ipynb | juancaruizc/CS42-DS-A-1-M2-Hash-Tables-I | bbd5c06abc74ebc4d210c7f20a4abe778f8ed3ee | [
"MIT"
] | null | null | null | 29.198925 | 404 | 0.393022 | [
[
[
"<a href=\"https://colab.research.google.com/github/juancaruizc/CS42-DS-A-1-M2-Hash-Tables-I/blob/main/CS42_DS_%26_A_1_M2_Hash_Tables_I.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Hash Tables\n\n**Attendance code: 4482**\n\nArrays have O(1) data retrieval _if you have the index_.\n\nIf you have to search for the data/index, arrays are O(n). That's a bummer.\n\nWhat if we had a magic function that would tell you the index for a given \"key\"?\n\nStore data as _key/value pairs_.\n\nWith `dict`s:\n\n```\nd = {}\n\nd[\"key\"] = value\n```\n\n## Operations on Hash Tables\n\n* GET - retrieve a value from the table\n* PUT - store a value in the table\n* DELETE - delete a value from the table\n\nShould all be O(1) over the number of keys/values.\n\n## Structure\n\nWe'll have an array to hold the data.\n\nValues will be at specific indexes in the array.\n\nWe'll have something called a _hashing function_ that takes a key and turns it into an index. This tells us where to look in the array for the value.\n\nThis function is _deterministic_, meaning that the same key will always produce the same index.\n\n## Operations Part II\n\n```\nGET(key):\n index = hashing_function(key)\n return table[index]\n```\n\n```\nPUT(key, value):\n index = hashing_function(key)\n table[index] = value\n```\n\n## Hashing Function\n\nNeed some way to map from a string to a number. Preferable a unique-randomish number.\n\n\n",
"_____no_output_____"
]
],
[
[
"d = {}\n\nd[\"goatcount\"] = 9\n\nd[\"key\"] = 'value'\n\nprint(d)\n\nprint(d[\"key\"]) # should print 'value', should also take O(1) time over the number of keys",
"{'goatcount': 9, 'key': 'value'}\nvalue\n"
],
[
"class HashTable:\n\n def __init__(self):\n self.table = [None] * 8 # Build an array of 8 elements to hold values\n\n def hashing_function(self, key):\n \"\"\"\n Naive hashing function\n\n use a real one like DJB2 or FNV1\n \"\"\"\n bignum = \"\"\n\n # O(n) over the length of the key\n # O(1) over the number of values in the table\n for c in key:\n bignum += str(ord(c))\n \n bignum = int(bignum)\n \n return bignum % len(self.table)\n\n def put(self, key, value):\n index = self.hashing_function(key)\n print(index)\n self.table[index] = value\n\n def get(self, key):\n index = self.hashing_function(key)\n return self.table[index]\n\nht = HashTable()\n\n#print(ht.hashing_function(\"goatcount\"))\n#print(ht.hashing_function(\"hello, world\"))\n\nht.put(\"goatcount\", 9)\nht.put(\"hello!\", \"foo\")\n#ht.put(\"test\", \"bar\") # Causes a collision with \"goatcount\"\n\nprint(ht.table)\n\nprint(f\"Value for goatcount: {ht.get('goatcount')}\") # Print \"9\"\nprint(f\"Value for hello!: {ht.get('hello!')}\") # Print \"9\"",
"_____no_output_____"
]
],
[
[
"# Applications of Hash Tables\n\nGoing to use `dict` for these.\n\n```\nd = {}\n\n# PUT\nd[\"key\"] = value\n\n# GET\nprint(d[\"key\"])\n```\n\n## Counting Items\n\n",
"_____no_output_____"
]
],
[
[
"#%%timeit\n\na = [1,6,7,9,5,3,3,5,7,8,8,6,5,4,3,4,6,7,8,8,5,4,6,7,8,9,7] * 70\n\ndef counter1(): # O(n^2)\n for e in a:\n count = 0\n \n for e2 in a:\n if e == e2:\n count += 1\n \n #print(e,count)\n\ndef counter2(): # O(n)\n count = {}\n\n for e in a:\n if e not in count: # Finding key `in` dictionary is O(1)\n count[e] = 0\n\n count[e] += 1\n\n print(count)\n\ncounter2()",
"{1: 70, 6: 280, 7: 350, 9: 140, 5: 280, 3: 210, 8: 350, 4: 210}\n"
],
[
"a = [1,6,7,9,5,3,3,5,7,8,8,6,5,4,3,4,6,7,8,8,5,4,6,7,8,9,7] * 70\n\ndef counter2(): # O(n)\n count = {}\n\n for e in a:\n if e not in count: # Finding key `in` dictionary is O(1)\n count[e] = 0\n\n count[e] += \n1\n # If you want to sort, first use dict.items()\n print(count)\n\n # sort by key\n sorted_count = sorted(count.items())\n\n print(list(count.items()))\n\n for k, v in sorted_count:\n print(f\"{k}: {v}\")\n\n print(\"------------\")\n # Sort by value\n \"\"\"\n def sort_by(e):\n return e[1]\n\n sorted_count = sorted(count.items(), key=sort_by)\n \"\"\"\n sorted_count = sorted(count.items(), key=lambda e: e[1]) # Same as above\n\n for k, v in sorted_count:\n print(f\"{v:>3}: {k}\")\n\n\ncounter2()",
"{1: 70, 6: 280, 7: 350, 9: 140, 5: 280, 3: 210, 8: 350, 4: 210}\n[(1, 70), (6, 280), (7, 350), (9, 140), (5, 280), (3, 210), (8, 350), (4, 210)]\n1: 70\n3: 210\n4: 210\n5: 280\n6: 280\n7: 350\n8: 350\n9: 140\n------------\n 70: 1\n140: 9\n210: 3\n210: 4\n280: 6\n280: 5\n350: 7\n350: 8\n"
],
[
"d = {}\n\nd[\"hi\"] = 12\nd[\"hi\"] = 22\n\nprint(d[\"hi\"])",
"22\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e79b1f7802aa935b86d128f7a598164007334ec5 | 1,091 | ipynb | Jupyter Notebook | 8-Labs/Lab27/Lab27-TH.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
] | null | null | null | 8-Labs/Lab27/Lab27-TH.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
] | null | null | null | 8-Labs/Lab27/Lab27-TH.ipynb | dustykat/engr-1330-psuedo-course | 3e7e31a32a1896fcb1fd82b573daa5248e465a36 | [
"CC0-1.0"
] | null | null | null | 20.203704 | 87 | 0.530706 | [
[
[
"# Lab 27\n\nProject Workshop - no formal lab exercise S2022\n\n- Choose Team\n- Decide how to meet to work on project\n- Choose a client manager (like a team leader, but one that does actual work!)\n- Make an effort report form\n- Choose a project\n\n## Effort Report Form\n\nBuild one for your team, something like:\n\n",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e79b4b98804cc172070d9b047e5d0aaf7ce9e61b | 331,884 | ipynb | Jupyter Notebook | nd101/p1-bike-sharing/DLND Your first neural network.ipynb | julianogalgaro/udacity | 3b5004669dc5070b5833990b4424939ddf2e0ea7 | [
"MIT"
] | null | null | null | nd101/p1-bike-sharing/DLND Your first neural network.ipynb | julianogalgaro/udacity | 3b5004669dc5070b5833990b4424939ddf2e0ea7 | [
"MIT"
] | 4 | 2020-03-30T21:15:52.000Z | 2020-03-30T21:15:57.000Z | nd101/p1-bike-sharing/DLND Your first neural network.ipynb | julianogalgaro/udacity | 3b5004669dc5070b5833990b4424939ddf2e0ea7 | [
"MIT"
] | null | null | null | 337.280488 | 161,968 | 0.900559 | [
[
[
"# Your first neural network\n\nIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.\n\n",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Load and prepare the data\n\nA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!",
"_____no_output_____"
]
],
[
[
"data_path = 'Bike-Sharing-Dataset/hour.csv'\n\nrides = pd.read_csv(data_path)",
"_____no_output_____"
],
[
"rides.head()",
"_____no_output_____"
]
],
[
[
"## Checking out the data\n\nThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.\n\nBelow is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.",
"_____no_output_____"
]
],
[
[
"rides[:24*10].plot(x='dteday', y='cnt')",
"_____no_output_____"
]
],
[
[
"### Dummy variables\nHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.",
"_____no_output_____"
]
],
[
[
"dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']\nfor each in dummy_fields:\n dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)\n rides = pd.concat([rides, dummies], axis=1)\n\nfields_to_drop = ['instant', 'dteday', 'season', 'weathersit', \n 'weekday', 'atemp', 'mnth', 'workingday', 'hr']\ndata = rides.drop(fields_to_drop, axis=1)\ndata.head()",
"_____no_output_____"
]
],
[
[
"### Scaling target variables\nTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.\n\nThe scaling factors are saved so we can go backwards when we use the network for predictions.",
"_____no_output_____"
]
],
[
[
"quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']\n# Store scalings in a dictionary so we can convert back later\nscaled_features = {}\nfor each in quant_features:\n mean, std = data[each].mean(), data[each].std()\n scaled_features[each] = [mean, std]\n data.loc[:, each] = (data[each] - mean)/std",
"_____no_output_____"
]
],
[
[
"### Splitting the data into training, testing, and validation sets\n\nWe'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.",
"_____no_output_____"
]
],
[
[
"# Save data for approximately the last 21 days \ntest_data = data[-21*24:]\n\n# Now remove the test data from the data set \ndata = data[:-21*24]\n\n# Separate the data into features and targets\ntarget_fields = ['cnt', 'casual', 'registered']\nfeatures, targets = data.drop(target_fields, axis=1), data[target_fields]\ntest_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]",
"_____no_output_____"
]
],
[
[
"We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).",
"_____no_output_____"
]
],
[
[
"# Hold out the last 60 days or so of the remaining data as a validation set\ntrain_features, train_targets = features[:-60*24], targets[:-60*24]\nval_features, val_targets = features[-60*24:], targets[-60*24:]",
"_____no_output_____"
]
],
[
[
"## Time to build the network\n\nBelow you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.\n\nThe network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.\n\nWe use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.\n\n> **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.\n\nBelow, you have these tasks:\n1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.\n2. Implement the forward pass in the `train` method.\n3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.\n4. Implement the forward pass in the `run` method.\n ",
"_____no_output_____"
]
],
[
[
"class NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.input_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5, \n (self.output_nodes, self.hidden_nodes))\n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.\n \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n \n \n def train(self, inputs_list, targets_list):\n # Convert inputs list to 2d array\n inputs = np.array(inputs_list, ndmin=2).T\n targets = np.array(targets_list, ndmin=2).T\n \n \n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = np.dot(self.weights_input_to_hidden,inputs) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = np.dot(self.weights_hidden_to_output,hidden_outputs) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n \n # TODO: Output error - Replace this value with your calculations.\n output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output.\n \n # TODO: Backpropagated error - Replace these values with your calculations.\n hidden_errors = np.dot(output_errors,self.weights_hidden_to_output)\n hidden_grad = hidden_outputs * (1.0 - hidden_outputs) # hidden layer gradients\n hidden_error_term = hidden_grad * hidden_errors.T\n \n # TODO: Update the weights - Replace these values with your calculations.\n self.weights_hidden_to_output += self.lr * output_errors * hidden_outputs.T # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += self.lr * hidden_error_term * inputs.T # update input-to-hidden weights with gradient descent step\n \n \n def run(self, inputs_list):\n # Run a forward pass through the network\n inputs = np.array(inputs_list, ndmin=2).T\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer\n hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = np.dot(self.weights_hidden_to_output,hidden_outputs) # signals into final output layer\n final_outputs = final_inputs # signals from final output layer \n \n return final_outputs",
"_____no_output_____"
],
[
"def MSE(y, Y):\n return np.mean((y-Y)**2)",
"_____no_output_____"
]
],
[
[
"## Training the network\n\nHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.\n\nYou'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.\n\n### Choose the number of epochs\nThis is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.\n\n### Choose the learning rate\nThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.\n\n### Choose the number of hidden nodes\nThe more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.",
"_____no_output_____"
]
],
[
[
"import sys\n\n### Set the hyperparameters here ###\nepochs = 1500\nlearning_rate = 0.01\nhidden_nodes = 8\noutput_nodes = 1\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\nlosses = {'train':[], 'validation':[]}\nfor e in range(epochs):\n # Go through a random batch of 128 records from the training data set\n batch = np.random.choice(train_features.index, size=128)\n for record, target in zip(train_features.ix[batch].values, \n train_targets.ix[batch]['cnt']):\n network.train(record, target)\n \n # Printing out the training progress\n train_loss = MSE(network.run(train_features), train_targets['cnt'].values)\n val_loss = MSE(network.run(val_features), val_targets['cnt'].values)\n sys.stdout.write(\"\\rProgress: \" + str(100 * e/float(epochs))[:4] \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n \n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)",
"Progress: 99.9% ... Training loss: 0.067 ... Validation loss: 0.165"
],
[
"plt.plot(losses['train'], label='Training loss')\nplt.plot(losses['validation'], label='Validation loss')\nplt.legend()\nplt.ylim(ymax=1)",
"_____no_output_____"
]
],
[
[
"## Check out your predictions\n\nHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(figsize=(8,4))\n\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features)*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)",
"_____no_output_____"
]
],
[
[
"## OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).\n \nAnswer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?\n\n> **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter\n\n#### Your answer below\n\nThe model predicted as well a good part of the results, but by the end of the year it probably missed the holidays.",
"_____no_output_____"
],
[
"## Unit tests\n\nRun these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.",
"_____no_output_____"
]
],
[
[
"import unittest\n\ninputs = [0.5, -0.2, 0.1]\ntargets = [0.4]\ntest_w_i_h = np.array([[0.1, 0.4, -0.3], \n [-0.2, 0.5, 0.2]])\ntest_w_h_o = np.array([[0.3, -0.1]])\n\nclass TestMethods(unittest.TestCase):\n \n ##########\n # Unit tests for data loading\n ##########\n \n def test_data_path(self):\n # Test that file path to dataset has been unaltered\n self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')\n \n def test_data_loaded(self):\n # Test that data frame loaded\n self.assertTrue(isinstance(rides, pd.DataFrame))\n \n ##########\n # Unit tests for network functionality\n ##########\n\n def test_activation(self):\n network = NeuralNetwork(3, 2, 1, 0.5)\n # Test that the activation function is a sigmoid\n self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))\n\n def test_train(self):\n # Test that weights are updated correctly on training\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n network.train(inputs, targets)\n \n \n self.assertTrue(np.allclose(network.weights_hidden_to_output, \n np.array([[ 0.37275328, -0.03172939]])))\n self.assertTrue(np.allclose(network.weights_input_to_hidden,\n np.array([[ 0.10562014, 0.39775194, -0.29887597],\n [-0.20185996, 0.50074398, 0.19962801]])))\n\n def test_run(self):\n # Test correctness of run method\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n self.assertTrue(np.allclose(network.run(inputs), 0.09998924))\n\nsuite = unittest.TestLoader().loadTestsFromModule(TestMethods())\nunittest.TextTestRunner().run(suite)",
".....\n----------------------------------------------------------------------\nRan 5 tests in 0.014s\n\nOK\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e79b4be8004b762b78387828ff66882f0aa63e73 | 15,444 | ipynb | Jupyter Notebook | fashion-mnist-classifier.ipynb | akapoorx00/machinelearning-stuff | 53184019b77d3387fd15b13d3bfa75529b8ed003 | [
"Apache-2.0"
] | null | null | null | fashion-mnist-classifier.ipynb | akapoorx00/machinelearning-stuff | 53184019b77d3387fd15b13d3bfa75529b8ed003 | [
"Apache-2.0"
] | null | null | null | fashion-mnist-classifier.ipynb | akapoorx00/machinelearning-stuff | 53184019b77d3387fd15b13d3bfa75529b8ed003 | [
"Apache-2.0"
] | null | null | null | 42.081744 | 5,924 | 0.683113 | [
[
[
"import numpy as np\nimport keras\nimport torchvision\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom keras.layers import *\nfrom keras import *\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"# Loading data\n(features_train, labels_train), (features_test, labels_test) = keras.datasets.fashion_mnist.load_data()",
"_____no_output_____"
],
[
"print (\"Shape of train data: \", features_train.shape)\nprint (\"Shape of test data: \", features_test.shape)\n",
"Shape of train data: (60000, 28, 28)\nShape of test data: (10000, 28, 28)\n"
],
[
"# Building model\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3)\n self.mp = nn.MaxPool2d(kernel_size=2)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=4)\n self.mp2 = nn.MaxPool2d(kernel_size=3)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=5)\n self.fc1 = nn.Linear(100, 100)\n self.fc2 = nn.Linear(100,10)\n def forward(self, x):\n out = F.relu(self.conv1(x))\n out = self.mp(out)\n out = F.relu(self.conv2(out))\n out = self.mp2(out)\n out = F.relu(self.conv3(out))\n out = out.view(x.size(0),-1)\n out = F.relu(self.fc1(out))\n out = F.softmax(self.fc2(out), dim=1)\n return out",
"_____no_output_____"
],
[
"net = Net()\noptimizer = torch.optim.Adam(net.parameters())\nloss = nn.NLLLoss()",
"_____no_output_____"
],
[
"x_train = Variable(torch.from_numpy(features_train)).float()\nx_test = Variable(torch.from_numpy(features_test)).float()",
"_____no_output_____"
],
[
"x_train = x_train.unsqueeze(1)",
"_____no_output_____"
],
[
"features_train = features_train.reshape(-1, 1, 28,28)\nlabels_train = utils.to_categorical(labels_train, num_classes=10)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(Conv2D(32, kernel_size=3, input_shape=(28,28,1), activation=\"relu\", data_format='channels_last'))\nmodel.add(MaxPooling2D())\nmodel.add(Conv2D(64, kernel_size=3, activation=\"relu\"))\nmodel.add(Flatten(data_format='channels_last'))\nmodel.add(Dense(100, activation=\"relu\"))\nmodel.add(Dense(10, activation=\"softmax\"))\nmodel.compile(optimizer=\"adam\", loss=keras.losses.categorical_crossentropy, metrics=['acc'])",
"_____no_output_____"
],
[
"model",
"_____no_output_____"
],
[
"model.summary()",
"_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_2 (Conv2D) (None, 32, 26, 26) 17280032 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 16, 13, 26) 0 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 14, 11, 64) 15040 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 9856) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 100) 985700 \n_________________________________________________________________\ndense_2 (Dense) (None, 10) 1010 \n=================================================================\nTotal params: 18,281,782\nTrainable params: 18,281,782\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"features_train = features_train.reshape(-1, 28,28, 1)",
"_____no_output_____"
],
[
"features_train.shape\nlabels_train = utils.to_categorical(labels_train, num_classes=10)",
"_____no_output_____"
],
[
"net",
"_____no_output_____"
],
[
"x_train.shape",
"_____no_output_____"
],
[
"x_train/=255",
"_____no_output_____"
],
[
"x_train.shape",
"_____no_output_____"
],
[
"plt.imshow(features_train[8], cmap=\"gray\")",
"_____no_output_____"
],
[
"labels_train[5]",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79b4dfa5b0b958490bb76c1d207299840992326 | 224,098 | ipynb | Jupyter Notebook | data/test_data/generate_data.ipynb | fewagner/excess | 5902455f1d5e08f0c375ae8a319f0e8f748300e8 | [
"CC-BY-4.0",
"MIT"
] | 4 | 2021-06-16T16:54:19.000Z | 2022-02-15T15:19:09.000Z | data/test_data/generate_data.ipynb | fewagner/excess | 5902455f1d5e08f0c375ae8a319f0e8f748300e8 | [
"CC-BY-4.0",
"MIT"
] | null | null | null | data/test_data/generate_data.ipynb | fewagner/excess | 5902455f1d5e08f0c375ae8a319f0e8f748300e8 | [
"CC-BY-4.0",
"MIT"
] | 1 | 2021-06-16T17:18:23.000Z | 2021-06-16T17:18:23.000Z | 40.0175 | 198 | 0.494668 | [
[
[
"****\n# Generation of Test Data\n****",
"_____no_output_____"
],
[
"In this notebook we generate some test data for the interactive histogram. We create both unbinned energy values and binned efficiency curves.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%config InlineBackend.figure_formats = ['svg']",
"_____no_output_____"
]
],
[
[
"# Energy Data",
"_____no_output_____"
],
[
"We generate the data randomly sampled from some standard distributions for three exemplary experiments.",
"_____no_output_____"
]
],
[
[
"exp_A = np.concatenate((np.random.exponential(scale=1, size=100000), \n np.random.normal(loc=5,scale=0.2, size=100000)))\nexp_B = np.concatenate((np.random.exponential(scale=0.5, size=50000), \n np.random.uniform(low=0,high=15, size=20000)))\nexp_C = np.concatenate((np.random.exponential(scale=2, size=150000), \n np.random.normal(loc=10,scale=2, size=150000)))\nexp_D = np.concatenate((np.random.exponential(scale=5, size=50000), \n np.random.normal(loc=15,scale=0.7, size=4000)))",
"_____no_output_____"
]
],
[
[
"Lets see how the data looks like.",
"_____no_output_____"
]
],
[
[
"bins = 200\n\nplt.hist(exp_A, bins=200, histtype='step', label='Exeriment A')\nplt.hist(exp_B, bins=200, histtype='step', label='Exeriment B')\nplt.hist(exp_C, bins=200, histtype='step', label='Exeriment C')\nplt.hist(exp_D, bins=200, histtype='step', label='Exeriment D')\nplt.ylabel('Counts')\nplt.xlabel('Energy (keV)')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"We save the data in a simple txt format.",
"_____no_output_____"
]
],
[
[
"np.savetxt('experiment_A.txt', exp_A)\nnp.savetxt('experiment_B.txt', exp_B)\nnp.savetxt('experiment_C.txt', exp_C)",
"_____no_output_____"
]
],
[
[
"For one of the experiments, we save the binned file only.",
"_____no_output_____"
]
],
[
[
"hist_D, bins_D = np.histogram(exp_D, bins=300, range=(0,40))\nnp.savetxt('experiment_D.txt', np.column_stack([bins_D[:-1], bins_D[1:], hist_D]))",
"_____no_output_____"
]
],
[
[
"# Efficiency Data",
"_____no_output_____"
],
[
"We create the efficiency curves on an already binned grid.",
"_____no_output_____"
]
],
[
[
"grid = np.arange(0.002, 20, 0.002)\n\neff_A = (np.ones(grid.shape) - np.exp(-grid))*0.8 + 0.2\neff_B = 0.9*np.ones(grid.shape)\neff_C = (np.sqrt(grid) / np.sqrt(grid[-1]) * 0.7*np.ones(grid.shape))*0.8 + 0.2\neff_D = np.ones(grid.shape)",
"_____no_output_____"
]
],
[
[
"Lets plot the curves.",
"_____no_output_____"
]
],
[
[
"plt.plot(eff_A, label='Efficiency A')\nplt.plot(eff_B, label='Efficiency B')\nplt.plot(eff_C, label='Efficiency C')\nplt.plot(eff_D, label='Efficiency D')\nplt.xlabel('Energy (keV)')\nplt.ylabel('Survival Probability')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"Now lets plot the re-weighted histogram.",
"_____no_output_____"
]
],
[
[
"# put the exposures\n\nexposure_A = 1\nexposure_B = 0.2\nexposure_C = 15\nexposure_D = np.random.uniform(size=len(hist_D)) + 1\n\n# make histograms\n\nhist_A, bins_A = np.histogram(exp_A, bins)\nhist_B, bins_B = np.histogram(exp_B, bins)\nhist_C, bins_C = np.histogram(exp_C, bins)\n\n# reweight with efficiencies\n\nhist_A = hist_A / np.interp(bins_A[:-1], grid, eff_A)\nhist_B = hist_B / np.interp(bins_B[:-1], grid, eff_B)\nhist_C = hist_C / np.interp(bins_C[:-1], grid, eff_C)\nhist_D = hist_D / np.interp(bins_D[:-1], grid, eff_D)\n\n# plot - comment the lines of experiments to not show them\n\nplt.hist(bins_A[:-1], bins_A, weights=hist_A/exposure_A, histtype='step', label='Experiment A', color='C0')\nplt.hist(bins_B[:-1], bins_B, weights=hist_B/exposure_B, histtype='step', label='Experiment B', color='C1')\nplt.hist(bins_C[:-1], bins_C, weights=hist_C/exposure_C, histtype='step', label='Experiment C', color='C2')\nplt.hist(bins_D[:-1], bins_D, weights=hist_D/exposure_D, histtype='step', label='Experiment D', color='C3')\nplt.xlabel('Energy (keV)')\nplt.ylabel('Counts')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"And save the efficiency curves to files as well.",
"_____no_output_____"
]
],
[
[
"np.savetxt('experiment_A_eff.txt', np.column_stack([grid, eff_A]))\nnp.savetxt('experiment_B_eff.txt', np.column_stack([grid, eff_B]))\nnp.savetxt('experiment_C_eff.txt', np.column_stack([grid, eff_C]))\nnp.savetxt('experiment_D_eff.txt', np.column_stack([grid, eff_D]))",
"_____no_output_____"
]
],
[
[
"Finally, write the exposures to files.",
"_____no_output_____"
]
],
[
[
"np.savetxt('experiment_A_exposure.txt', [exposure_A])\nnp.savetxt('experiment_B_exposure.txt', [exposure_B])\nnp.savetxt('experiment_C_exposure.txt', [exposure_C])\nnp.savetxt('experiment_D_exposure.txt', np.column_stack([(bins_D[1:] - bins_D[:-1])/2 + bins_D[:-1], exposure_D]))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79b6f90cd9a2cf1496261013549bc4a3bc54d45 | 4,613 | ipynb | Jupyter Notebook | Utility_References.ipynb | chakra-ai/DeepNeuralNetworks | a5001dae7b8ff917219490bf6c2e27b2ff19afd3 | [
"MIT"
] | null | null | null | Utility_References.ipynb | chakra-ai/DeepNeuralNetworks | a5001dae7b8ff917219490bf6c2e27b2ff19afd3 | [
"MIT"
] | null | null | null | Utility_References.ipynb | chakra-ai/DeepNeuralNetworks | a5001dae7b8ff917219490bf6c2e27b2ff19afd3 | [
"MIT"
] | null | null | null | 32.258741 | 483 | 0.525471 | [
[
[
"<a href=\"https://colab.research.google.com/github/chakra-ai/DeepNeuralNetworks/blob/master/Utility_References.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"# Google Colab Instructions\nfrom google.colab import drive\ndrive.mount('/content/drive')",
"Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n"
],
[
"!ls /content/drive/My\\ Drive/Colab\\ Notebooks",
" 1_0_Python_DataTypes.ipynb NaiveBayesApproach.ipynb\n 1_1_Python_Packages.ipynb PyTorch\n FeedForwardNetwork.ipynb 'Utility References.ipynb'\n"
],
[
"# What version of python do you have?\nimport sys\nimport tensorflow.keras\nimport pandas as pd\nimport sklearn as sk\nimport tensorflow as tf\n\nprint(f\"Python Version: {sys.version}\")\nprint(f\"Tensorflow Version: {tf.__version__}\")\nprint(f\"Keras Version: {tensorflow.keras.__version__}\")\nprint(f\"Scikit-Learn Version: {sk.__version__}\")\nprint(\"GPU is \", \"Available\" if tf.test.is_gpu_available() else \"Not Available\")",
"Python Version: 3.6.9 (default, Apr 18 2020, 01:56:04) \n[GCC 8.4.0]\nTensorflow Version: 2.2.0\nKeras Version: 2.3.0-tf\nScikit-Learn Version: 0.22.2.post1\nWARNING:tensorflow:From <ipython-input-4-e3986e2d6fa2>:12: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.config.list_physical_devices('GPU')` instead.\nGPU is Not Available\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e79b7aefab3462e0e7bec7aabee34ab4ff26138e | 121,429 | ipynb | Jupyter Notebook | pandas/06_Stats/Wind_Stats/Solutions.ipynb | eric999j/Udemy_Python_Hand_On | 7a985b3e2c9adfd3648d240af56ac00bb916c3ad | [
"Apache-2.0"
] | 1 | 2020-12-31T18:03:34.000Z | 2020-12-31T18:03:34.000Z | pandas/06_Stats/Wind_Stats/Solutions.ipynb | cntfk2017/Udemy_Python_Hand_On | 52f2a5585bfdea95d893f961c8c21844072e93c7 | [
"Apache-2.0"
] | null | null | null | pandas/06_Stats/Wind_Stats/Solutions.ipynb | cntfk2017/Udemy_Python_Hand_On | 52f2a5585bfdea95d893f961c8c21844072e93c7 | [
"Apache-2.0"
] | 2 | 2019-09-23T14:26:48.000Z | 2020-05-25T07:09:26.000Z | 34.62475 | 347 | 0.30916 | [
[
[
"# Wind Statistics",
"_____no_output_____"
],
[
"### Introduction:\n\nThe data have been modified to contain some missing values, identified by NaN. \nUsing pandas should make this exercise\neasier, in particular for the bonus question.\n\nYou should be able to perform all of these operations without using\na for loop or other looping construct.\n\n\n1. The data in 'wind.data' has the following format:",
"_____no_output_____"
]
],
[
[
"\"\"\"\nYr Mo Dy RPT VAL ROS KIL SHA BIR DUB CLA MUL CLO BEL MAL\n61 1 1 15.04 14.96 13.17 9.29 NaN 9.87 13.67 10.25 10.83 12.58 18.50 15.04\n61 1 2 14.71 NaN 10.83 6.50 12.62 7.67 11.50 10.04 9.79 9.67 17.54 13.83\n61 1 3 18.50 16.88 12.33 10.13 11.17 6.17 11.25 NaN 8.50 7.67 12.75 12.71\n\"\"\"",
"_____no_output_____"
]
],
[
[
" The first three columns are year, month and day. The\n remaining 12 columns are average windspeeds in knots at 12\n locations in Ireland on that day. \n\n More information about the dataset go [here](wind.desc).",
"_____no_output_____"
],
[
"### Step 1. Import the necessary libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport datetime",
"_____no_output_____"
]
],
[
[
"### Step 2. Import the dataset from this [address](https://github.com/guipsamora/pandas_exercises/blob/master/Stats/Wind_Stats/wind.data)",
"_____no_output_____"
],
[
"### Step 3. Assign it to a variable called data and replace the first 3 columns by a proper datetime index.",
"_____no_output_____"
],
[
"### Step 4. Year 2061? Do we really have data from this year? Create a function to fix it and apply it.",
"_____no_output_____"
],
[
"### Step 5. Set the right dates as the index. Pay attention at the data type, it should be datetime64[ns].",
"_____no_output_____"
],
[
"### Step 6. Compute how many values are missing for each location over the entire record. \n#### They should be ignored in all calculations below. ",
"_____no_output_____"
],
[
"### Step 7. Compute how many non-missing values there are in total.",
"_____no_output_____"
],
[
"### Step 8. Calculate the mean windspeeds of the windspeeds over all the locations and all the times.\n#### A single number for the entire dataset.",
"_____no_output_____"
],
[
"### Step 9. Create a DataFrame called loc_stats and calculate the min, max and mean windspeeds and standard deviations of the windspeeds at each location over all the days \n\n#### A different set of numbers for each location.",
"_____no_output_____"
],
[
"### Step 10. Create a DataFrame called day_stats and calculate the min, max and mean windspeed and standard deviations of the windspeeds across all the locations at each day.\n\n#### A different set of numbers for each day.",
"_____no_output_____"
],
[
"### Step 11. Find the average windspeed in January for each location. \n#### Treat January 1961 and January 1962 both as January.",
"_____no_output_____"
],
[
"### Step 12. Downsample the record to a yearly frequency for each location.",
"_____no_output_____"
],
[
"### Step 13. Downsample the record to a monthly frequency for each location.",
"_____no_output_____"
],
[
"### Step 14. Downsample the record to a weekly frequency for each location.",
"_____no_output_____"
],
[
"### Step 15. Calculate the mean windspeed for each month in the dataset. \n#### Treat January 1961 and January 1962 as *different* months.\n#### (hint: first find a way to create an identifier unique for each month.)",
"_____no_output_____"
],
[
"### Step 16. Calculate the min, max and mean windspeeds and standard deviations of the windspeeds across all locations for each week (assume that the first week starts on January 2 1961) for the first 52 weeks.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e79b85237afe1e8ae969b372662c064721f32756 | 48,931 | ipynb | Jupyter Notebook | lessons/Week3-lesson.ipynb | pyladiesams/Bootcamp-Data-Analysis-beginner-apr-may2020 | 8dace9617c9bff16fcc1fca17c18ab15ea3da6d4 | [
"MIT"
] | 24 | 2020-04-25T16:22:26.000Z | 2021-11-14T15:38:13.000Z | lessons/Week3-lesson.ipynb | pyladiesams/Bootcamp-Data-Analysis-beginner-apr-may2020 | 8dace9617c9bff16fcc1fca17c18ab15ea3da6d4 | [
"MIT"
] | null | null | null | lessons/Week3-lesson.ipynb | pyladiesams/Bootcamp-Data-Analysis-beginner-apr-may2020 | 8dace9617c9bff16fcc1fca17c18ab15ea3da6d4 | [
"MIT"
] | 10 | 2020-04-29T23:16:33.000Z | 2021-07-02T11:01:21.000Z | 40.041735 | 3,154 | 0.492387 | [
[
[
"# Agenda\n1. Recap: list and loops // Questions about assignment\n2. List comprehension\n3. Dictionaries\n4. Pandas datatypes\n5. Read data with Pandas\n6. Explore data with Pandas\n7. Work with missing values\n\n\n# List comprehension",
"_____no_output_____"
]
],
[
[
"my_list = ['wordA', 'wordB']\n\n#normal loop\nnew_list1 = []\nfor item in my_list:\n new_list1.append(item.upper())\n\n#list comprehension\nnew_list2 = [item.upper() for item in my_list]\n\nprint(new_list1, new_list2)",
"['WORDA', 'WORDB'] ['WORDA', 'WORDB']\n"
]
],
[
[
"# Python dictionaries\n- Dictionary is a python datatype that is used to store key-value pairs. It enables you to quickly retrieve, add, remove, modify values using a key. Dictionary is very similar to what we call associative array or hash in other languages.\n- {} and seprated by ,\n\nDictionaries and lists share the following characteristics:\n\n- Both are mutable (can be changed)\n- Both are dynamic. They can grow and shrink as needed.\n- Both can be nested. A list can contain another list. A dictionary can contain another dictionary. A dictionary can also contain a list, and vice versa.\n- Dictionaries differ from lists primarily in how elements are accessed:\n\nList elements are accessed by their position in the list, via indexing.\nDictionary elements are accessed via keys.",
"_____no_output_____"
]
],
[
[
"mydict = {\"name\": \"Demi\",\n \"birth_year\": 1994, \n \"hobby\": \"programming\"}",
"_____no_output_____"
],
[
"print(mydict['name'])",
"Demi\n"
],
[
"mydict[0]",
"_____no_output_____"
],
[
"mydict.keys()",
"_____no_output_____"
],
[
"mydict.values()",
"_____no_output_____"
],
[
"for key, value in mydict.items():\n print(key.upper())",
"NAME\nBIRTH_YEAR\nHOBBY\n"
],
[
"for item in mydict.values():\n print(item)",
"Demi\n1994\nprogramming\n"
],
[
"#change a value\nmydict['name'] = \"DeeJay\"",
"_____no_output_____"
],
[
"mydict.items()",
"_____no_output_____"
],
[
"# dictonaries can contain any data type\nmydict = {\"names\": [\"Demi\", \"DeeJay\"],\n \"birth_year\": 1994, \n \"hobby\": [\"programming\", \"yoga\", \"drinking wine\"]}",
"_____no_output_____"
]
],
[
[
"# Exercise\n- Create a dictionary about yourself, list at least 2 hobbies\n- Print only your second hobby\n- What is your birth_year?",
"_____no_output_____"
],
[
"# Pandas\n- Pandas stands for “Python Data Analysis Library\"\n- pandas is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool, it takes data (like a CSV or TSV file, or a SQL database) and creates a Python object with rows and columns called dataframe that looks very similar to table in a statistical software (think Excel or SPSS for example). \n- similar to R\n- pandas is a libary or module, therefore if we want to use it, we need to instal and import it. You can make use of the functions that are defined in the module by calling them with . (dot), like you did with list.split() or string.strip()",
"_____no_output_____"
]
],
[
[
"# Install a conda package in the current Jupyter kernel\nimport sys\n!conda install --yes --prefix {sys.prefix} pandas",
"Collecting package metadata (current_repodata.jsodone\nSolving envidone\n\n## Package Plan ##\n\n environment location: /usr/local/Caskroom/miniconda/base/envs/testj\n\n added / updated specs:\n - pandas\n\n\nThe following NEW packages will be INSTALLED:\n\n blas pkgs/main/osx-64::blas-1.0-mkl\n intel-openmp pkgs/main/osx-64::intel-openmp-2020.1-216\n libgfortran pkgs/main/osx-64::libgfortran-3.0.1-h93005f0_2\n mkl pkgs/main/osx-64::mkl-2019.4-233\n mkl-service pkgs/main/osx-64::mkl-service-2.3.0-py37hfbe908c_0\n mkl_fft pkgs/main/osx-64::mkl_fft-1.0.15-py37h5e564d8_0\n mkl_random pkgs/main/osx-64::mkl_random-1.1.0-py37ha771720_0\n numpy pkgs/main/osx-64::numpy-1.18.1-py37h7241aed_0\n numpy-base pkgs/main/osx-64::numpy-base-1.18.1-py37h6575580_1\n pandas pkgs/main/osx-64::pandas-1.0.3-py37h6c726b0_0\n pytz pkgs/main/noarch::pytz-2020.1-py_0\n\n\nPreparing transaction:done\nVerifying transact| WARNING conda.core.path_actions:verify(963): Unable to create environments file. Path not writable.\n environment location: /Users/alyonagalyeva/.conda/environments.txt\n\ndone\nExecut\\ WARNING conda.core.envs_manager:register_env(52): Unable to register environment. Path not writable or missing.\n environment location: /usr/local/Caskroom/miniconda/base/envs/testj\n registry file: /Users/alyonagalyeva/.conda/environments.txt\ndone\n"
]
],
[
[
"- after the installation we need to import the libary, you need to do import for every Jupyter notebook. \n- `as pd` is an alias, if you do not do 'as' you will have to type pandas everytime. Programmers are lazy, so we use shortcuts such as pd ",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"# Pandas datatypes\nThere are two core objects in Pandas: the DataFrame and the Series.\n\n### Series\nPandas Series is a one-dimensional labeled array, capable of holding data of any type (integer, string, float, python objects, etc.). The axis labels are collectively called index. Pandas Series is nothing, but a column in an Excel sheet. Like in Excel every row in the sheet has \n- an index\n- a value or datapoint (if you entered a value)\n\n<img src=\"data/images/series-index.png\" width=\"300\"/>\n\n**img from: https://codechalleng.es/bites/251/\n\n*** Did we already told you, you can do amazing stuff with markdown? https://about.gitlab.com/handbook/markdown-guide/",
"_____no_output_____"
]
],
[
[
"# assign the variable s to Series\ns = pd.Series(data, index=index)",
"_____no_output_____"
],
[
"# lets define data\ndata = [2,4,6,5]",
"_____no_output_____"
],
[
"# lets try it again\ns = pd.Series(data, index=index)",
"_____no_output_____"
],
[
"# we need to have the same amount of indexes as data points\nmy_index = [0,1,2,3]",
"_____no_output_____"
],
[
"# try to change my_index\npd.Series(data, index=my_index)",
"_____no_output_____"
]
],
[
[
"# Exercise\nHow can you use python functions to define the index? Remember, you're lazy!\n- Hint: \n - Length of the data and the index needs to be the same\n - Have you used the range function before?",
"_____no_output_____"
],
[
"### DataFrame\nA DataFrame is a table. It contains an array of individual entries, each of which has a certain value. Each entry corresponds to a row (or record) and a column.\n- not limited to integers also strings \n\n<img src=\"data/images/Dataframe-expl.png\" width=\"500\"/>\n<img src=\"data/images/Dataframe.png\" width=\"500\"/>\n\n** image from = https://www.geeksforgeeks.org/ and https://www.learndatasci.com/\nFor example, consider the following simple DataFrame",
"_____no_output_____"
]
],
[
[
"df_with_numbers = pd.DataFrame({'Yes': [53, 21], 'No': [13, 1]})\ndf_with_numbers",
"_____no_output_____"
],
[
"pd.DataFrame({'Bob': ['I liked it.', 'It was awful.'], 'Sue': ['Pretty good.', 'Boring.']})",
"_____no_output_____"
]
],
[
[
"# Read data\nBeing able to create a DataFrame or Series manually is handy. But, most of the time, we won't actually create our own data manually. Instead, we'll be working with data that already exists.\n\nData can be stored in any number of different forms and formats. By far the most basic is a CSV file. When you open a CSV file you get something that looks like this:\n\nProduct A,Product B,Product C,\n30,21,9,\n35,34,1,\n41,11,11\n\nDownload data from Kaggle or take a look at this data descriprion:\nhttps://www.kaggle.com/kimjihoo/ds4c-what-is-this-dataset-detailed-description",
"_____no_output_____"
]
],
[
[
"# read the data and store it in df variable\npath = 'data/coronavirusdataset/Case.csv'\ndf = pd.read_csv(path)",
"_____no_output_____"
]
],
[
[
"# Viewing and Inspecting Data\nNow that you’ve loaded your data, it’s time to take a look at it. How does the dataframe look like? Running the name of the data frame would give you the entire table, but you can also use functions",
"_____no_output_____"
]
],
[
[
"# get the first n rows with df.head(n), or the last n rows with df.tail(n)\ndf.head()",
"_____no_output_____"
],
[
"len(df)",
"_____no_output_____"
],
[
"# check the number of rows and columns\ndf.shape",
"_____no_output_____"
],
[
"# important to check non-null values\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 112 entries, 0 to 111\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 case_id 112 non-null int64 \n 1 province 112 non-null object\n 2 city 112 non-null object\n 3 group 112 non-null bool \n 4 infection_case 112 non-null object\n 5 confirmed 112 non-null int64 \n 6 latitude 112 non-null object\n 7 longitude 112 non-null object\ndtypes: bool(1), int64(2), object(5)\nmemory usage: 6.4+ KB\n"
],
[
"# check only the columns\ndf.columns",
"_____no_output_____"
],
[
"df.group\n#df['group']",
"_____no_output_____"
],
[
"df['city'].describe()",
"_____no_output_____"
],
[
"df['province'].unique()",
"_____no_output_____"
],
[
"# view unique values and counts for a series (like a column or a few columns)\ndf['city'].value_counts()",
"_____no_output_____"
]
],
[
[
"# Exercise\n1. How many individual provinces does this dataset contain?\n2. Display the top three MENTIONED provinces",
"_____no_output_____"
],
[
"# Slices",
"_____no_output_____"
]
],
[
[
"df[1:4]",
"_____no_output_____"
],
[
"cases_in_gurogu = df[df.city == 'Guro-gu']\ncases_in_gurogu",
"_____no_output_____"
],
[
"df.confirmed.sum()",
"_____no_output_____"
]
],
[
[
"# Exercise\n1. How many confirmed cases are there in Eunpyeong-gu ?",
"_____no_output_____"
],
[
"# Missing data\nEntries with missing values are given the value NaN, short for \"Not a Number\". For technical reasons these NaN values are always float64 dtype.\n\n### Copying dataframe\nIn Pandas, indexing a DataFrame returns a reference to the initial DataFrame. By changing the subset we change the initial DataFrame. Thus, you'd want to use the copy if you want to make sure the initial DataFrame shouldn't be changed. Consider the following code:",
"_____no_output_____"
]
],
[
[
"# index, column\nmissing_data_df = df.copy()",
"_____no_output_____"
],
[
"missing_data_df",
"_____no_output_____"
],
[
"# create missing values\nmissing_data_df.at[0, 'confirmed'] = None",
"_____no_output_____"
],
[
"missing_data_df",
"_____no_output_____"
]
],
[
[
"Pandas provides some methods specific to manipulating the missing data. To select NaN entries you can use pd.isnull() (or its companion pd.notnull()). ",
"_____no_output_____"
]
],
[
[
"df[pd.isnull(df.city)]\n# df.isnull().values.any()\n# df.info",
"_____no_output_____"
]
],
[
[
"Replacing missing values is a common operation. Pandas provides a really handy method for this problem: fillna~(). \nfillna() provides a few different strategies for mitigating such data. For example, we can simply replace each NaN with an \"Unknown\":",
"_____no_output_____"
]
],
[
[
"# if any non value exsist, fill with unknown\ndf.city.fillna(\"Unknown\")",
"_____no_output_____"
]
],
[
[
"# Exercise\n1. fill the missing values of the confirmed cases with the average of the confirmed cases",
"_____no_output_____"
],
[
"Missing values are not always NaN, they can also be [\"n/a\", \"na\", \"-\", \"\"]. If needed we can also replace these values.",
"_____no_output_____"
]
],
[
[
"# df.latitude\ndf.latitude.unique() # check the - ",
"_____no_output_____"
],
[
"# replace values\ndf.latitude.replace('-', \"unknown\")",
"_____no_output_____"
]
],
[
[
"# Study materials\n- Dictionaries: https://realpython.com/python-dicts/\n- List comprehension: https://www.pythonforbeginners.com/basics/list-comprehensions-in-python\n- Missing values: https://towardsdatascience.com/data-cleaning-with-python-and-pandas-detecting-missing-values-3e9c6ebcf78b\n- Can't wait for more pandas? https://github.com/justmarkham/pandas-videos (do not worry; we will cover most of it next week)\n\n# Next lesson\n- More dataframe exercises and transformations\n- Apply\n- Numpy\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e79b8ff168ca600125c3cf7495d9a674137df3a9 | 29,978 | ipynb | Jupyter Notebook | Intervalo-de-confianca.ipynb | franciscoicmc/simulacao | c16998ceb1c2c26664c043a80a953acfc86c8ae8 | [
"CC0-1.0"
] | 39 | 2020-04-17T18:37:17.000Z | 2022-02-18T10:41:12.000Z | Intervalo-de-confianca.ipynb | franciscoicmc/simulacao | c16998ceb1c2c26664c043a80a953acfc86c8ae8 | [
"CC0-1.0"
] | null | null | null | Intervalo-de-confianca.ipynb | franciscoicmc/simulacao | c16998ceb1c2c26664c043a80a953acfc86c8ae8 | [
"CC0-1.0"
] | 17 | 2020-07-22T01:57:20.000Z | 2022-02-26T11:51:02.000Z | 104.090278 | 21,464 | 0.841784 | [
[
[
"# Intervalos de Confiança",
"_____no_output_____"
],
[
"Francisco A. Rodrigues, University of São Paulo.<br> \nhttps://sites.icmc.usp.br/francisco<br>\[email protected]",
"_____no_output_____"
],
[
"Esse notebook é relacionado à aula: \nhttps://www.youtube.com/watch?v=AkmyfLc-EOs",
"_____no_output_____"
],
[
"Podemos interpretaro intervalo de confiança de $(1-\\alpha)100\\%$ através de simulações. ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nn = 50 # tamanho da amostra\nNs = 100 # numero de intervalos\nmu = 2 # media populacional\nsigma = 2 # desvio padrão populacional\nbeta = 0.95 # nivel de confianca\nzalpha = 1.96 # valor de z (a partir de beta)\nc = 0 # conta o numero de intervalos que contem a media\nplt.figure(figsize=(14,10))\nfor s in range(1,Ns):\n x = np.random.normal(mu, sigma, n) # sorteia uma amostra de tamanho n\n IC1 = np.mean(x) - zalpha*sigma/np.sqrt(n) #intervalo inferior\n IC2 = np.mean(x) + zalpha*sigma/np.sqrt(n) #intervalo superior\n if(mu > IC1 and mu < IC2):\n c = c + 1\n # mostra o intervalo em cinza se continar a media\n plt.vlines(s, ymin=IC1, ymax=IC2, color = 'gray')\n plt.plot(s,np.mean(x), 'o', color = 'gray', \n markersize=5)\n else:\n # mostra o intervalo que nao contem a media\n plt.vlines(s, ymin=IC1, ymax=IC2, color = 'black', linestyles = 'dashed')\n plt.plot(s,np.mean(x), 'o', color = 'black',\n markersize=5)\nplt.axhline(y = mu, color = 'black') # mostra a media populacional\nplt.xlabel('Amostra', fontsize=20)\nplt.show()\nprint('Nível de confiança:', beta)\nprint('Fraçao de intervalos que contém a média:', c/Ns)",
"_____no_output_____"
]
],
[
[
"## Calculo do Intervalo de confiança",
"_____no_output_____"
],
[
"Podemos implementar uma função para calcular o intervalo de confiança automaticamente. ",
"_____no_output_____"
]
],
[
[
"import scipy.stats\nimport numpy as np\n\ndef confident_interval(Xs, n, confidence = 0.95, sigma = -1, s = -1):\n zalpha = abs(scipy.stats.norm.ppf((1 - confidence)/2.))\n if(sigma != -1): # se a variancia eh conhecida\n IC1 = Xs - zalpha*sigma/np.sqrt(n)\n IC2 = Xs + zalpha*sigma/np.sqrt(n)\n else: # se a variancia eh desconhecida\n if(n >= 50): # se o tamanho da amostra eh maior do que 50\n # Usa a distribuicao normal\n IC1 = Xs - zalpha*s/np.sqrt(n)\n IC2 = Xs + zalpha*s/np.sqrt(n)\n else: # se o tamanho da amostra eh menor do que 50\n # Usa a distribuicao t de Student\n talpha = scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n IC1 = Xs - talpha*s/np.sqrt(n)\n IC2 = Xs + talpha*s/np.sqrt(n)\n return [IC1, IC2]",
"_____no_output_____"
]
],
[
[
"**Exemplo**: Em uma empresa de distribuição de alimentos pela internet, verificou-se que o tempo necessário para uma entrega tem distribuição normal com média $\\mu = 30$ minutos e desvio padrão $\\sigma = 10$ minutos. Em uma amostra de 50 entregadores, observou-se um tempo médio de entrega $\\bar{X}_{50} = 25$ minutos. Determine o intervalo de 95\\% de confiança para a média $\\mu$ de todos os entregadores da empresa.",
"_____no_output_____"
]
],
[
[
"Xs = 25\nn = 50\nconfidence =0.95\nsigma = 10\nIC = confident_interval(Xs,n, confidence, sigma)\nprint('Confidence interval:', IC)",
"Confidence interval: [22.228192351300645, 27.771807648699355]\n"
]
],
[
[
"**Exemplo** Em um provedor de videos na Internet, verificou-se que para uma amostra de 15 usuários, o tempo médio de exibição é igual a $\\bar{X}_{15} = 39,3$ minutos e o desvio padrão da amostra $S_{15} = 2,6$ minutos. Encontre um intervalo de 90\\% para a média populacional $\\mu$.",
"_____no_output_____"
]
],
[
[
"Xs = 39.3\ns = 2.6\nn = 15\nconfidence =0.9\n\nIC = confident_interval(Xs,n, confidence, -1, s)\nprint('Confidence interval:', IC)",
"Confidence interval: [38.117602363950525, 40.48239763604947]\n"
]
],
[
[
"Para um conjunto de dados, temos a função abaixo.",
"_____no_output_____"
]
],
[
[
"import scipy.stats\nimport numpy as np\n\ndef confident_interval_data(X, confidence = 0.95, sigma = -1):\n def S(X): #funcao para calcular o desvio padrao amostral\n s = 0\n for i in range(0,len(X)):\n s = s + (X[i] - np.mean(X))**2\n s = np.sqrt(s/(len(X)-1))\n return s\n n = len(X) # numero de elementos na amostra\n Xs = np.mean(X) # media amostral\n s = S(X) # desvio padrao amostral\n zalpha = abs(scipy.stats.norm.ppf((1 - confidence)/2))\n if(sigma != -1): # se a variancia eh conhecida\n IC1 = Xs - zalpha*sigma/np.sqrt(n)\n IC2 = Xs + zalpha*sigma/np.sqrt(n)\n else: # se a variancia eh desconhecida\n if(n >= 50): # se o tamanho da amostra eh maior do que 50\n # Usa a distribuicao normal\n IC1 = Xs - zalpha*s/np.sqrt(n)\n IC2 = Xs + zalpha*s/np.sqrt(n)\n else: # se o tamanho da amostra eh menor do que 50\n # Usa a distribuicao t de Student\n talpha = scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n IC1 = Xs - talpha*s/np.sqrt(n)\n IC2 = Xs + talpha*s/np.sqrt(n)\n return [IC1, IC2]",
"_____no_output_____"
]
],
[
[
"Executando para um exemplo.",
"_____no_output_____"
]
],
[
[
"X = [1, 2, 3, 4, 5]\nconfidence = 0.95\n\nIC = confident_interval_data(X, confidence)\nprint('Confidence interval:', IC)",
"Confidence interval: [1.0367568385224393, 4.9632431614775605]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79b91230f5da6e1fce4db40f16761f7715a4520 | 11,105 | ipynb | Jupyter Notebook | 2-Chapter-2/Test_your_knowledge.ipynb | DiegoMerino28/Practical-Data-Science-with-Python | b05cffac6fe46c2a3cc77b556af262b1a5b7f8a0 | [
"MIT"
] | null | null | null | 2-Chapter-2/Test_your_knowledge.ipynb | DiegoMerino28/Practical-Data-Science-with-Python | b05cffac6fe46c2a3cc77b556af262b1a5b7f8a0 | [
"MIT"
] | null | null | null | 2-Chapter-2/Test_your_knowledge.ipynb | DiegoMerino28/Practical-Data-Science-with-Python | b05cffac6fe46c2a3cc77b556af262b1a5b7f8a0 | [
"MIT"
] | null | null | null | 27.487624 | 328 | 0.54462 | [
[
[
"This notebook will help you practice some of the skills and concepts you learned in chapter 2 of the book:\n- Strings, Numbers\n- Variables\n- Lists, Sets, Dictionaries\n- Loops and list comprehensions\n- Control Flow\n- Functions\n- Classes\n- Packages/Modules\n- Debugging an error\n- Using documentation",
"_____no_output_____"
],
[
"Here we have some data on the number of books read by different people who work at Bob's Book Emporium. Create Python code that loops through each of the people and prints out how many books they have read. If someone has read 0 books, print out \"___ has not read any books!\" instead of the number of books.",
"_____no_output_____"
]
],
[
[
"people = ['Krishnang', 'Steve', 'Jimmy', 'Mary', 'Divya', 'Robert', 'Yulia']\nbooks_read = [12, 6, 0, 7, 4, 10, 15]\nfor i in range(len(people)):\n if books_read[i] == 0:\n print(people[i] + \"has not read any books!\")\n else:\n print(people[i] + \" has read \" + str(books_read[i]) + \" books!\")\n",
"Krishnang has read 12 books!\nSteve has read 6 books!\nJimmyhas not read any books!\nMary has read 7 books!\nDivya has read 4 books!\nRobert has read 10 books!\nYulia has read 15 books!\n"
]
],
[
[
"There are several ways to solve this -- you could look at the `zip()` function, use `enumerate()`, use `range` and `len`, or use other methods. To print the names and values, you can use string concatenation (+), f-string formatting, or other methods.",
"_____no_output_____"
]
],
[
[
"# your code here",
"_____no_output_____"
]
],
[
[
"Turn the loop we just created into a function that takes the two lists (books read and people) as arguments. Be sure to try out your function to make sure it works.",
"_____no_output_____"
]
],
[
[
"def people_books(people, books_read):\n for i in range(len(people)):\n if books_read[i] == 0:\n print(people[i] + \"has not read any books!\")\n else:\n print(people[i] + \" has read \" + str(books_read[i]) + \" books!\")\n\npeople_books(people, books_read)",
"Krishnang has read 12 books!\nSteve has read 6 books!\nJimmyhas not read any books!\nMary has read 7 books!\nDivya has read 4 books!\nRobert has read 10 books!\nYulia has read 15 books!\n"
]
],
[
[
"Challenge: Sort the values of `books_read` from greatest to least and print the top three people with the number of books they have read. This is a tougher problem. Some possible ways to solve it include using NumPy's argsort, creating a dictionary, and creating tuples.",
"_____no_output_____"
]
],
[
[
"new_dict = {}\nfor i in range(len(books_read)):\n new_dict[people[i]] = books_read[i]\n\nsorted_dicctionary = sorted(new_dict.items(), key = lambda x: x[1], reverse=True)\nprint(sorted_dicctionary)\n ",
"[('Yulia', 15), ('Krishnang', 12), ('Robert', 10), ('Mary', 7), ('Steve', 6), ('Divya', 4), ('Jimmy', 0)]\n"
]
],
[
[
"Bob's books gets a discount for every multiple of 3 books their employees buy and read. Find out how many multiples of 3 books they have read, and how many more books need to be read to get to the next multiple of 3. Python has a built-in `sum` function that may be useful here, and don't forget about the modulo operator.",
"_____no_output_____"
]
],
[
[
"sum_books = sum(books_read)\ndiscounted = sum_books//3\nremaining = sum_books % 3",
"_____no_output_____"
]
],
[
[
"Create a dictionary for the data where the keys are people's names and the values are the number of books. An advanced way to do this would be with a dictionary comprehension, but you can also use a loop.",
"_____no_output_____"
]
],
[
[
"# your code here\ndicctionary = {person : books for person,books in zip(people, books_read)}\nprint(dicctionary)",
"{'Krishnang': 12, 'Steve': 6, 'Jimmy': 0, 'Mary': 7, 'Divya': 4, 'Robert': 10, 'Yulia': 15}\n"
]
],
[
[
"Challenge: Use the dictionary to print out the top 3 people with the most books read. This is where Stack Overflow and searching the web might come in handy -- try searching 'sort dictionary by value in Python'.",
"_____no_output_____"
]
],
[
[
"# your code here\nsorted_dicctionary = sorted(dicctionary.items(), key = lambda x:x[1], reverse = True)[:3]\nsorted_dicctionary",
"_____no_output_____"
]
],
[
[
"Using sets, ensure there are no duplicate names in our data. (Yes, this is trivial since our data is small and we can manually inspect it, but if we had thousands of names, we could use the same method as we do here.)",
"_____no_output_____"
]
],
[
[
"set_people = set(people)\nprint(set_people)",
"{'Yulia', 'Robert', 'Steve', 'Mary', 'Divya', 'Jimmy', 'Krishnang'}\n"
]
],
[
[
"Create a class for storing the books read and people's names. The class should also include a function for printing out the top three book readers. Test out your class to make sure it works.",
"_____no_output_____"
]
],
[
[
"class books_people:\n def __init__(self, people, books_read):\n self.people = people\n self.books_read = books_read\n\n def print_top_readers(self):\n book_tuples = ((b,p) for b,p in zip(self.books_read, self.people))\n for b,p in sorted(book_tuples, reverse= True)[:3]:\n print(f'{p} has read {b} books!')\n\nbr = books_people(people, books_read)\nbr.print_top_readers()",
"Yulia has read 15 books!\nKrishnang has read 12 books!\nRobert has read 10 books!\n"
]
],
[
[
"Use the time module to see how long it takes to make a new class and print out the top three readers.",
"_____no_output_____"
]
],
[
[
"import time\nstart = time.time()\nbr=books_people(people, books_read)\nbr.print_top_readers()\n\nelapsed = time.time() - start\nprint(f'It has elapsed {elapsed} seconds')",
"Yulia has read 15 books!\nKrishnang has read 12 books!\nRobert has read 10 books!\nIt has elapsed 0.0005550384521484375 seconds\n"
]
],
[
[
"Another way to do this is with the %%timeit magic command:",
"_____no_output_____"
],
[
"The code below is throwing a few errors. Debug and correct the error so the code runs.",
"_____no_output_____"
]
],
[
[
"for b, p in list(zip(books_read, people))[:3]:\n if b > 0 and b < 10:\n print(p + ' has only read ' + str(b) + ' books')",
"Steve has only read 6 books\n"
]
],
[
[
"Use the documentation (https://docs.python.org/3/library/stdtypes.html#string-methods) to understand how the functions `rjust` and `ljust` work, then modify the loop below so the output looks something like:\n\n```\nKrishnang------12 books\nSteve---------- 6 books\nJimmy---------- 0 books\nMary----------- 7 books\nDivya---------- 4 books\nRobert---------10 books\nYulia----------15 books\n```",
"_____no_output_____"
]
],
[
[
"for b, p in zip(books_read, people):\n print(f'{p.ljust(15, \"-\")}{str(b).rjust(2)} books')",
"Krishnang------12 books\nSteve---------- 6 books\nJimmy---------- 0 books\nMary----------- 7 books\nDivya---------- 4 books\nRobert---------10 books\nYulia----------15 books\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79bb42717c6c3dc5eba5cf3ae60a3a6304621bf | 14,067 | ipynb | Jupyter Notebook | standardiser/docs/Miscellaeny.ipynb | thesgc/standardiser | 635f2608e70c18dc17fa32d514abafc227ff005e | [
"Apache-2.0"
] | 2 | 2016-03-15T15:27:25.000Z | 2021-07-05T04:53:09.000Z | standardiser/docs/Miscellaeny.ipynb | thesgc/standardiser | 635f2608e70c18dc17fa32d514abafc227ff005e | [
"Apache-2.0"
] | null | null | null | standardiser/docs/Miscellaeny.ipynb | thesgc/standardiser | 635f2608e70c18dc17fa32d514abafc227ff005e | [
"Apache-2.0"
] | null | null | null | 81.784884 | 2,235 | 0.829317 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e79bdfb0d036359006ae140432166f60fb3254fe | 577,796 | ipynb | Jupyter Notebook | notebooks/analyses_reports/2019-03-23_to_03-27_ab4_llr_i_loved.ipynb | alphagov/govuk_ab_analysis | fec954d9c90be09e1a74ced64551c2eb68b05d56 | [
"MIT"
] | 9 | 2019-02-04T08:45:50.000Z | 2021-04-22T04:08:49.000Z | notebooks/analyses_reports/2019-03-23_to_03-27_ab4_llr_i_loved.ipynb | ukgovdatascience/govuk_ab_analysis | 26e24f38b2811eb0f25d9cd97dbd1732823dbc4c | [
"MIT"
] | 18 | 2019-02-04T14:32:33.000Z | 2019-06-12T10:08:35.000Z | notebooks/analyses_reports/2019-03-23_to_03-27_ab4_llr_i_loved.ipynb | alphagov/govuk_ab_analysis | fec954d9c90be09e1a74ced64551c2eb68b05d56 | [
"MIT"
] | 1 | 2021-04-11T08:56:05.000Z | 2021-04-11T08:56:05.000Z | 219.694297 | 108,172 | 0.910626 | [
[
[
"# A/B test 4 - loved journeys, control vs LLR\n\nThis related links B/C test (ab4) was conducted from 22nd-28th March 2019.\n\nThe data used in this report are 23rd-27th Mar 2019 because the test was started partway through 22nd Ma, and ended partway through 28th Mar.\n\nThe test compared the existing related links (where available) to links generated using LLR algorithm ",
"_____no_output_____"
],
[
"## Import",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n\nimport os \nimport pandas as pd\nimport numpy as np\nimport ast\nimport re\n\n# z test\nfrom statsmodels.stats.proportion import proportions_ztest\n\n# bayesian bootstrap and vis\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport bayesian_bootstrap.bootstrap as bb\nfrom astropy.utils import NumpyRNGContext\n\n# progress bar\nfrom tqdm import tqdm, tqdm_notebook\n\nfrom scipy import stats\nfrom collections import Counter\n\nimport sys\nsys.path.insert(0, '../../src' )\nimport analysis as analysis",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"# set up the style for our plots\nsns.set(style='white', palette='colorblind', font_scale=1.3,\n rc={'figure.figsize':(12,9), \n \"axes.facecolor\": (0, 0, 0, 0)})\n\n# instantiate progress bar goodness\ntqdm.pandas(tqdm_notebook)\n\npd.set_option('max_colwidth',500)\n\n# the number of bootstrap means used to generate a distribution\nboot_reps = 10000\n\n# alpha - false positive rate\nalpha = 0.05\n# number of tests\nm = 4\n# Correct alpha for multiple comparisons\nalpha = alpha / m\n\n# The Bonferroni correction can be used to adjust confidence intervals also. \n# If one establishes m confidence intervals, and wishes to have an overall confidence level of 1-alpha,\n# each individual confidence interval can be adjusted to the level of 1-(alpha/m).\n\n# reproducible\nseed = 1337",
"_____no_output_____"
]
],
[
[
"## File/dir locations\n### Processed journey data",
"_____no_output_____"
]
],
[
[
"DATA_DIR = os.getenv(\"DATA_DIR\")\nfilename = \"full_sample_loved_947858.csv.gz\"\nfilepath = os.path.join(\n DATA_DIR, \"sampled_journey\", \"20190323_20190327\",\n filename)\nfilepath",
"_____no_output_____"
],
[
"CONTROL_GROUP = \"B\"\nINTERVENTION_GROUP = \"C\"\n\nVARIANT_DICT = {\n 'CONTROL_GROUP':'B',\n 'INTERVENTION_GROUP':'C'\n}",
"_____no_output_____"
],
[
"# read in processed sampled journey with just the cols we need for related links\ndf = pd.read_csv(filepath, sep =\"\\t\", compression=\"gzip\")\n# convert from str to list\ndf['Event_cat_act_agg']= df['Event_cat_act_agg'].progress_apply(ast.literal_eval)\ndf['Page_Event_List'] = df['Page_Event_List'].progress_apply(ast.literal_eval)\ndf['Page_List'] = df['Page_List'].progress_apply(ast.literal_eval)",
"100%|██████████| 772387/772387 [00:58<00:00, 13120.96it/s]\n100%|██████████| 772387/772387 [01:14<00:00, 10318.75it/s]\n100%|██████████| 772387/772387 [00:14<00:00, 51676.71it/s]\n"
],
[
"# drop dodgy rows, where page variant is not A or B.\ndf = df.query('ABVariant in [@CONTROL_GROUP, @INTERVENTION_GROUP]')",
"_____no_output_____"
],
[
"df[['Occurrences', 'ABVariant']].groupby('ABVariant').sum()",
"_____no_output_____"
],
[
"df['Page_List_Length'] = df['Page_List'].progress_apply(len)\n",
"100%|██████████| 772387/772387 [00:00<00:00, 786616.91it/s]\n"
]
],
[
[
"### Nav type of page lookup - is it a finding page? if not it's a thing page",
"_____no_output_____"
]
],
[
[
"filename = \"document_types.csv.gz\"\n\n# created a metadata dir in the DATA_DIR to hold this data\nfilepath = os.path.join(\n DATA_DIR, \"metadata\",\n filename)\nprint(filepath)\n\ndf_finding_thing = pd.read_csv(filepath, sep=\"\\t\", compression=\"gzip\")\n\ndf_finding_thing.head()",
"/Users/suganyasivaskantharajah/code/govuk_ab_analysis/data/metadata/document_types.csv.gz\n"
],
[
"thing_page_paths = df_finding_thing[\n df_finding_thing['is_finding']==0]['pagePath'].tolist()\n\n\nfinding_page_paths = df_finding_thing[\n df_finding_thing['is_finding']==1]['pagePath'].tolist()",
"_____no_output_____"
]
],
[
[
"## Outliers\nSome rows should be removed before analysis. For example rows with journey lengths of 500 or very high related link click rates. This process might have to happen once features have been created.",
"_____no_output_____"
],
[
"# Derive variables",
"_____no_output_____"
],
[
"## journey_click_rate\nThere is no difference in the proportion of journeys using at least one related link (journey_click_rate) between page variant A and page variant B.\n\n",
"_____no_output_____"
],
[
"\\begin{equation*}\n\\frac{\\text{total number of journeys including at least one click on a related link}}{\\text{total number of journeys}}\n\\end{equation*}",
"_____no_output_____"
]
],
[
[
"# get the number of related links clicks per Sequence\ndf['Related Links Clicks per seq'] = df['Event_cat_act_agg'].map(analysis.sum_related_click_events)",
"_____no_output_____"
],
[
"# map across the Sequence variable, which includes pages and Events\n# we want to pass all the list elements to a function one-by-one and then collect the output.\ndf[\"Has_Related\"] = df[\"Related Links Clicks per seq\"].map(analysis.is_related)\n\ndf['Related Links Clicks row total'] = df['Related Links Clicks per seq'] * df['Occurrences']\n",
"_____no_output_____"
],
[
"df.head(3)",
"_____no_output_____"
]
],
[
[
"## count of clicks on navigation elements\n\nThere is no statistically significant difference in the count of clicks on navigation elements per journey between page variant A and page variant B.\n\n\\begin{equation*}\n{\\text{total number of navigation element click events from content pages}}\n\\end{equation*}",
"_____no_output_____"
],
[
"### Related link counts",
"_____no_output_____"
]
],
[
[
"# get the total number of related links clicks for that row (clicks per sequence multiplied by occurrences)\ndf['Related Links Clicks row total'] = df['Related Links Clicks per seq'] * df['Occurrences']",
"_____no_output_____"
]
],
[
[
"### Navigation events",
"_____no_output_____"
]
],
[
[
"def count_nav_events(page_event_list):\n \"\"\"Counts the number of nav events from a content page in a Page Event List.\"\"\"\n content_page_nav_events = 0\n for pair in page_event_list:\n if analysis.is_nav_event(pair[1]):\n if pair[0] in thing_page_paths:\n content_page_nav_events += 1\n return content_page_nav_events",
"_____no_output_____"
],
[
"# needs finding_thing_df read in from document_types.csv.gz\ndf['Content_Page_Nav_Event_Count'] = df['Page_Event_List'].progress_map(count_nav_events)",
"100%|██████████| 772387/772387 [17:44<00:00, 725.28it/s] \n"
],
[
"def count_search_from_content(page_list):\n search_from_content = 0\n for i, page in enumerate(page_list):\n if i > 0:\n if '/search?q=' in page:\n if page_list[i-1] in thing_page_paths:\n search_from_content += 1\n return search_from_content",
"_____no_output_____"
],
[
"df['Content_Search_Event_Count'] = df['Page_List'].progress_map(count_search_from_content)",
"100%|██████████| 772387/772387 [16:03:14<00:00, 13.36it/s] \n"
],
[
"# count of nav or search clicks\ndf['Content_Nav_or_Search_Count'] = df['Content_Page_Nav_Event_Count'] + df['Content_Search_Event_Count']\n# occurrences is accounted for by the group by bit in our bayesian boot analysis function\ndf['Content_Nav_Search_Event_Sum_row_total'] = df['Content_Nav_or_Search_Count'] * df['Occurrences']\n# required for journeys with no nav later\ndf['Has_No_Nav_Or_Search'] = df['Content_Nav_Search_Event_Sum_row_total'] == 0",
"_____no_output_____"
]
],
[
[
"## Temporary df file in case of crash\n### Save",
"_____no_output_____"
]
],
[
[
"df.to_csv(os.path.join(\n DATA_DIR, \n \"ab3_loved_temp.csv.gz\"), sep=\"\\t\", compression=\"gzip\", index=False)",
"_____no_output_____"
],
[
"df = pd.read_csv(os.path.join(\n DATA_DIR, \n \"ab3_loved_temp.csv.gz\"), sep=\"\\t\", compression=\"gzip\")",
"_____no_output_____"
]
],
[
[
"### Frequentist statistics",
"_____no_output_____"
],
[
"#### Statistical significance",
"_____no_output_____"
]
],
[
[
"# help(proportions_ztest)",
"_____no_output_____"
],
[
"has_rel = analysis.z_prop(df, 'Has_Related', VARIANT_DICT)\nhas_rel",
"_____no_output_____"
],
[
"has_rel['p-value'] < alpha",
"_____no_output_____"
]
],
[
[
"#### Practical significance - uplift",
"_____no_output_____"
]
],
[
[
"# Due to multiple testing we used the Bonferroni correction for alpha\nci_low,ci_upp = analysis.zconf_interval_two_samples(has_rel['x_a'], has_rel['n_a'],\n has_rel['x_b'], has_rel['n_b'], alpha = alpha)\nprint(' difference in proportions = {0:.2f}%'.format(100*(has_rel['p_b']-has_rel['p_a'])))\nprint(' % relative change in proportions = {0:.2f}%'.format(100*((has_rel['p_b']-has_rel['p_a'])/has_rel['p_a'])))\nprint(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'\n .format(100*ci_low, 100*ci_upp))",
" difference in proportions = 2.20%\n % relative change in proportions = 62.91%\n 95% Confidence Interval = ( 2.12% , 2.28% )\n"
]
],
[
[
"### Bayesian statistics ",
"_____no_output_____"
],
[
"Based on [this](https://medium.com/@thibalbo/coding-bayesian-ab-tests-in-python-e89356b3f4bd) blog",
"_____no_output_____"
],
[
"To be developed, a Bayesian approach can provide a simpler interpretation.",
"_____no_output_____"
],
[
"### Bayesian bootstrap",
"_____no_output_____"
]
],
[
[
"analysis.compare_total_searches(df, VARIANT_DICT)",
"total searches in control group = 55162\ntotal searches in intervention group = 50942\nintervention has 4220 fewer navigation or searches than control;\na 3.98% overall difference\nThe relative change was -7.65% from control to intervention\n"
],
[
"fig, ax = plt.subplots()\nplot_df_B = df[df.ABVariant == VARIANT_DICT['INTERVENTION_GROUP']].groupby(\n 'Content_Nav_or_Search_Count').sum().iloc[:, 0]\nplot_df_A = df[df.ABVariant == VARIANT_DICT['CONTROL_GROUP']].groupby(\n 'Content_Nav_or_Search_Count').sum().iloc[:, 0]\n\nax.set_yscale('log')\nwidth =0.4\nax = plot_df_B.plot.bar(label='B', position=1, width=width)\nax = plot_df_A.plot.bar(label='A', color='salmon', position=0, width=width)\nplt.title(\"loved journeys\")\nplt.ylabel(\"Log(number of journeys)\")\nplt.xlabel(\"Number of uses of search/nav elements in journey\")\n\nlegend = plt.legend(frameon=True)\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.savefig('nav_counts_loved_bar.png', dpi = 900, bbox_inches = 'tight')",
"_____no_output_____"
],
[
"a_bootstrap, b_bootstrap = analysis.bayesian_bootstrap_analysis(df, col_name='Content_Nav_or_Search_Count', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)",
"_____no_output_____"
],
[
"np.array(a_bootstrap).mean()",
"_____no_output_____"
],
[
"np.array(a_bootstrap).mean() - (0.05 * np.array(a_bootstrap).mean())",
"_____no_output_____"
],
[
"np.array(b_bootstrap).mean()",
"_____no_output_____"
],
[
"print(\"A relative change of {0:.2f}% from control to intervention\".format((np.array(b_bootstrap).mean()-np.array(a_bootstrap).mean())/np.array(a_bootstrap).mean()*100))",
"A relative change of -7.66% from control to intervention\n"
],
[
"# ratio is vestigial but we keep it here for convenience\n# it's actually a count but considers occurrences\nratio_stats = analysis.bb_hdi(a_bootstrap, b_bootstrap, alpha=alpha)\nratio_stats",
"_____no_output_____"
],
[
"ax = sns.distplot(b_bootstrap, label='B')\nax.errorbar(x=[ratio_stats['b_ci_low'], ratio_stats['b_ci_hi']], y=[2, 2], linewidth=5, c='teal', marker='o', \n label='95% HDI B')\n\nax = sns.distplot(a_bootstrap, label='A', ax=ax, color='salmon')\nax.errorbar(x=[ratio_stats['a_ci_low'], ratio_stats['a_ci_hi']], y=[5, 5], linewidth=5, c='salmon', marker='o', \n label='95% HDI A')\n\nax.set(xlabel='mean search/nav count per journey', ylabel='Density')\nsns.despine()\nlegend = plt.legend(frameon=True, bbox_to_anchor=(0.75, 1), loc='best')\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.title(\"loved journeys\")\n\nplt.savefig('nav_counts_loved.png', dpi = 900, bbox_inches = 'tight')",
"_____no_output_____"
],
[
"# calculate the posterior for the difference between A's and B's ratio\n# ypa prefix is vestigial from blog post\nypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)\n# get the hdi\nypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)\n\n# the mean of the posterior\nprint('mean:', ypa_diff.mean())\n\nprint('low ci:', ypa_diff_ci_low, '\\nhigh ci:', ypa_diff_ci_hi)",
"mean: -0.004457367613877534\nlow ci: -0.005355436557732962 \nhigh ci: -0.003570505635536597\n"
],
[
"ax = sns.distplot(ypa_diff)\nax.plot([ypa_diff_ci_low, ypa_diff_ci_hi], [0, 0], linewidth=10, c='k', marker='o', \n label='95% HDI')\nax.set(xlabel='Content_Nav_or_Search_Count', ylabel='Density', \n title='The difference between B\\'s and A\\'s mean counts times occurrences')\nsns.despine()\nlegend = plt.legend(frameon=True)\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.show();",
"_____no_output_____"
],
[
"# We count the number of values greater than 0 and divide by the total number\n# of observations\n# which returns us the the proportion of values in the distribution that are\n# greater than 0, could act a bit like a p-value\n(ypa_diff > 0).sum() / ypa_diff.shape[0]",
"_____no_output_____"
],
[
"# We count the number of values less than 0 and divide by the total number\n# of observations\n# which returns us the the proportion of values in the distribution that are\n# less than 0, could act a bit like a p-value\n(ypa_diff < 0).sum() / ypa_diff.shape[0]",
"_____no_output_____"
],
[
"(ypa_diff>0).sum()",
"_____no_output_____"
],
[
"(ypa_diff<0).sum()",
"_____no_output_____"
]
],
[
[
"## proportion of journeys with a page sequence including content and related links only\n\nThere is no statistically significant difference in the proportion of journeys with a page sequence including content and related links only (including loops) between page variant A and page variant B",
"_____no_output_____"
],
[
"\\begin{equation*}\n\\frac{\\text{total number of journeys that only contain content pages and related links (i.e. no nav pages)}}{\\text{total number of journeys}}\n\\end{equation*}",
"_____no_output_____"
],
[
"### Overall",
"_____no_output_____"
]
],
[
[
"# if (Content_Nav_Search_Event_Sum == 0) that's our success\n# Has_No_Nav_Or_Search == 1 is a success\n# the problem is symmetrical so doesn't matter too much\nsum(df.Has_No_Nav_Or_Search * df.Occurrences) / df.Occurrences.sum()",
"_____no_output_____"
],
[
"sns.distplot(df.Content_Nav_or_Search_Count.values);",
"_____no_output_____"
]
],
[
[
"### Frequentist statistics\n#### Statistical significance",
"_____no_output_____"
]
],
[
[
"nav = analysis.z_prop(df, 'Has_No_Nav_Or_Search', VARIANT_DICT)\nnav",
"_____no_output_____"
]
],
[
[
"#### Practical significance - uplift",
"_____no_output_____"
]
],
[
[
"# Due to multiple testing we used the Bonferroni correction for alpha\nci_low,ci_upp = analysis.zconf_interval_two_samples(nav['x_a'], nav['n_a'],\n nav['x_b'], nav['n_b'], alpha = alpha)\ndiff = 100*(nav['x_b']/nav['n_b']-nav['x_a']/nav['n_a'])\nprint(' difference in proportions = {0:.2f}%'.format(diff))\nprint(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'\n .format(100*ci_low, 100*ci_upp))",
" difference in proportions = 0.28%\n 95% Confidence Interval = ( 0.20% , 0.35% )\n"
],
[
"print(\"There was a {0: .2f}% relative change in the proportion of journeys not using search/nav elements\".format(100 * ((nav['p_b']-nav['p_a'])/nav['p_a'])))",
"There was a 0.29% relative change in the proportion of journeys not using search/nav elements\n"
]
],
[
[
"## Average Journey Length (number of page views)\nThere is no statistically significant difference in the average page list length of journeys (including loops) between page variant A and page variant B.",
"_____no_output_____"
]
],
[
[
"length_B = df[df.ABVariant == VARIANT_DICT['INTERVENTION_GROUP']].groupby(\n 'Page_List_Length').sum().iloc[:, 0]\nlengthB_2 = length_B.reindex(np.arange(1, 501, 1), fill_value=0)\n\nlength_A = df[df.ABVariant == VARIANT_DICT['CONTROL_GROUP']].groupby(\n 'Page_List_Length').sum().iloc[:, 0]\nlengthA_2 = length_A.reindex(np.arange(1, 501, 1), fill_value=0)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(100, 30))\n\nax.set_yscale('log')\nwidth = 0.4\nax = lengthB_2.plot.bar(label='B', position=1, width=width)\nax = lengthA_2.plot.bar(label='A', color='salmon', position=0, width=width)\nplt.xlabel('length', fontsize=1)\nlegend = plt.legend(frameon=True)\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.show();",
"_____no_output_____"
]
],
[
[
"### Bayesian bootstrap for non-parametric hypotheses",
"_____no_output_____"
]
],
[
[
"# http://savvastjortjoglou.com/nfl-bayesian-bootstrap.html",
"_____no_output_____"
],
[
"# let's use mean journey length (could probably model parametrically but we use it for demonstration here)\n# some journeys have length 500 and should probably be removed as they are liekely bots or other weirdness",
"_____no_output_____"
],
[
"#exclude journeys of longer than 500 as these could be automated traffic",
"_____no_output_____"
],
[
"df_short = df[df['Page_List_Length'] < 500]",
"_____no_output_____"
],
[
"print(\"The mean number of pages in an loved journey is {0:.3f}\".format(sum(df.Page_List_Length*df.Occurrences)/df.Occurrences.sum()))",
"The mean number of pages in an loved journey is 2.948\n"
],
[
"# for reproducibility, set the seed within this context\na_bootstrap, b_bootstrap = analysis.bayesian_bootstrap_analysis(df, col_name='Page_List_Length', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)\na_bootstrap_short, b_bootstrap_short = analysis.bayesian_bootstrap_analysis(df_short, col_name='Page_List_Length', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)",
"_____no_output_____"
],
[
"np.array(a_bootstrap).mean()",
"_____no_output_____"
],
[
"np.array(b_bootstrap).mean()",
"_____no_output_____"
],
[
"print(\"There's a relative change in page length of {0:.2f}% from A to B\".format((np.array(b_bootstrap).mean()-np.array(a_bootstrap).mean())/np.array(a_bootstrap).mean()*100))",
"There's a relative change in page length of 1.12% from A to B\n"
],
[
"print(np.array(a_bootstrap_short).mean())\nprint(np.array(b_bootstrap_short).mean())",
"2.9317573438523623\n2.964725520570392\n"
],
[
"# Calculate a 95% HDI\na_ci_low, a_ci_hi = bb.highest_density_interval(a_bootstrap)\nprint('low ci:', a_ci_low, '\\nhigh ci:', a_ci_hi)",
"low ci: 2.9231442449823506 \nhigh ci: 2.9403731110159375\n"
],
[
"ax = sns.distplot(a_bootstrap, color='salmon')\nax.plot([a_ci_low, a_ci_hi], [0, 0], linewidth=10, c='k', marker='o', \n label='95% HDI')\nax.set(xlabel='Journey Length', ylabel='Density', title='Page Variant A Mean Journey Length')\nsns.despine()\nplt.legend();",
"_____no_output_____"
],
[
"# Calculate a 95% HDI\nb_ci_low, b_ci_hi = bb.highest_density_interval(b_bootstrap)\nprint('low ci:', b_ci_low, '\\nhigh ci:', b_ci_hi)",
"low ci: 2.955781152370974 \nhigh ci: 2.9736959195222066\n"
],
[
"ax = sns.distplot(b_bootstrap)\nax.plot([b_ci_low, b_ci_hi], [0, 0], linewidth=10, c='k', marker='o', \n label='95% HDI')\nax.set(xlabel='Journey Length', ylabel='Density', title='Page Variant B Mean Journey Length')\nsns.despine()\nlegend = plt.legend(frameon=True)\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.show();",
"_____no_output_____"
],
[
"ax = sns.distplot(b_bootstrap, label='B')\nax = sns.distplot(a_bootstrap, label='A', ax=ax, color='salmon')\nax.set(xlabel='Journey Length', ylabel='Density')\nsns.despine()\nlegend = plt.legend(frameon=True)\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.title(\"loved journeys\")\n\nplt.savefig('journey_length_loved.png', dpi = 900, bbox_inches = 'tight')",
"_____no_output_____"
],
[
"ax = sns.distplot(b_bootstrap_short, label='B')\nax = sns.distplot(a_bootstrap_short, label='A', ax=ax, color='salmon')\nax.set(xlabel='Journey Length', ylabel='Density')\nsns.despine()\nlegend = plt.legend(frameon=True)\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.show();",
"_____no_output_____"
]
],
[
[
"We can also measure the uncertainty in the difference between the Page Variants's Journey Length by subtracting their posteriors.\n\n",
"_____no_output_____"
]
],
[
[
"# calculate the posterior for the difference between A's and B's YPA\nypa_diff = np.array(b_bootstrap) - np.array(a_bootstrap)\n# get the hdi\nypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)",
"_____no_output_____"
],
[
"# the mean of the posterior\nypa_diff.mean()",
"_____no_output_____"
],
[
"print('low ci:', ypa_diff_ci_low, '\\nhigh ci:', ypa_diff_ci_hi)\n",
"low ci: 0.021046380362182315 \nhigh ci: 0.04576424973256987\n"
],
[
"ax = sns.distplot(ypa_diff)\nax.plot([ypa_diff_ci_low, ypa_diff_ci_hi], [0, 0], linewidth=10, c='k', marker='o', \n label='95% HDI')\nax.set(xlabel='Journey Length', ylabel='Density', \n title='The difference between B\\'s and A\\'s mean Journey Length')\nsns.despine()\nlegend = plt.legend(frameon=True)\nframe = legend.get_frame()\nframe.set_facecolor('white')\nplt.show();",
"_____no_output_____"
]
],
[
[
"We can actually calculate the probability that B's mean Journey Length was greater than A's mean Journey Length by measuring the proportion of values greater than 0 in the above distribution.",
"_____no_output_____"
]
],
[
[
"# We count the number of values greater than 0 and divide by the total number\n# of observations\n# which returns us the the proportion of values in the distribution that are\n# greater than 0, could act a bit like a p-value\n(ypa_diff > 0).sum() / ypa_diff.shape[0]",
"_____no_output_____"
],
[
"# We count the number of values greater than 0 and divide by the total number\n# of observations\n# which returns us the the proportion of values in the distribution that are\n# greater than 0, could act a bit like a p-value\n(ypa_diff < 0).sum() / ypa_diff.shape[0]",
"_____no_output_____"
]
],
[
[
"# Some other analysis\n\nSome of these results raised more questions, so here's some analysis (with metrics that weren't defined before looking at the other results, so not sure they are statistically valid, but may be interesting nevertheless)",
"_____no_output_____"
],
[
"Perhaps journey length is increasing because we're seeing fewer bouncers (journey length = 1) because tey are seeing a relevant lnk on their first page instead of giving up",
"_____no_output_____"
],
[
"## Proportion of journeys that are length 1",
"_____no_output_____"
]
],
[
[
"def is_one(x):\n \"\"\"Compute whether a journey's length is 1.\"\"\"\n return x == 1",
"_____no_output_____"
],
[
"df['journey_length_1'] = df['Page_List_Length'].progress_apply(is_one)",
"100%|██████████| 772387/772387 [00:00<00:00, 846978.07it/s]\n"
]
],
[
[
"#### Statistical significance",
"_____no_output_____"
]
],
[
[
"is_length_1 = analysis.z_prop(df, 'journey_length_1', VARIANT_DICT)\nis_length_1",
"_____no_output_____"
],
[
"is_length_1['p-value'] < alpha",
"_____no_output_____"
]
],
[
[
"#### Practical significance",
"_____no_output_____"
]
],
[
[
"# Due to multiple testing we used the Bonferroni correction for alpha\nci_low,ci_upp = analysis.zconf_interval_two_samples(is_length_1['x_a'], is_length_1['n_a'],\n is_length_1['x_b'], is_length_1['n_b'], alpha = alpha)\nprint(' difference in proportions = {0:.2f}%'.format(100*(is_length_1['p_b']-is_length_1['p_a'])))\nprint(' % relative change in proportions = {0:.2f}%'.format(100*((is_length_1['p_b']-is_length_1['p_a'])/is_length_1['p_a'])))\nprint(' 95% Confidence Interval = ( {0:.2f}% , {1:.2f}% )'\n .format(100*ci_low, 100*ci_upp))",
" difference in proportions = -0.31%\n % relative change in proportions = -0.63%\n 95% Confidence Interval = ( -0.49% , -0.13% )\n"
]
],
[
[
"## Average journey length where length > 1",
"_____no_output_____"
]
],
[
[
"# for reproducibility, set the seed within this context\na_bootstrap_gt_1, b_bootstrap_gt_1 = analysis.bayesian_bootstrap_analysis(df[df['journey_length_1'] == False], col_name='Page_List_Length', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)\n# a_bootstrap_short_gt_1, b_bootstrap_short_gt_1 = analysis.bayesian_bootstrap_analysis(df_short, col_name='Page_List_Length', boot_reps=boot_reps, seed = seed, variant_dict=VARIANT_DICT)",
"_____no_output_____"
],
[
"np.array(a_bootstrap_gt_1).mean()",
"_____no_output_____"
],
[
"np.array(b_bootstrap_gt_1).mean()",
"_____no_output_____"
],
[
"print(\"There's a relative change in page length of {0:.2f}% from A to B\".format((np.array(b_bootstrap_gt_1).mean()-np.array(a_bootstrap_gt_1).mean())/np.array(a_bootstrap_gt_1).mean()*100))",
"There's a relative change in page length of 0.85% from A to B\n"
],
[
"# calculate the posterior for the difference between A's and B's YPA\nypa_diff = np.array(b_bootstrap_gt_1) - np.array(a_bootstrap_gt_1)\n# get the hdi\nypa_diff_ci_low, ypa_diff_ci_hi = bb.highest_density_interval(ypa_diff)",
"_____no_output_____"
],
[
"print('low ci:', ypa_diff_ci_low, '\\nhigh ci:', ypa_diff_ci_hi)\n",
"low ci: 0.020260235948303063 \nhigh ci: 0.06327940799389875\n"
],
[
"# We count the number of values greater than 0 and divide by the total number\n# of observations\n# which returns us the the proportion of values in the distribution that are\n# greater than 0, could act a bit like a p-value\n(ypa_diff > 0).sum() / ypa_diff.shape[0]",
"_____no_output_____"
],
[
"# We count the number of values greater than 0 and divide by the total number\n# of observations\n# which returns us the the proportion of values in the distribution that are\n# greater than 0, could act a bit like a p-value\n(ypa_diff < 0).sum() / ypa_diff.shape[0]",
"_____no_output_____"
]
],
[
[
"LLR increases journey lengths across the board",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e79be07e581dd25f8d35b2bdc231020187a605fe | 772,796 | ipynb | Jupyter Notebook | module05_mnist_conv.ipynb | YUMVOLKOVA/Neural_Networks_and_CV | 1efd26a25451812dba3f5e4148330a68a485f920 | [
"MIT"
] | null | null | null | module05_mnist_conv.ipynb | YUMVOLKOVA/Neural_Networks_and_CV | 1efd26a25451812dba3f5e4148330a68a485f920 | [
"MIT"
] | null | null | null | module05_mnist_conv.ipynb | YUMVOLKOVA/Neural_Networks_and_CV | 1efd26a25451812dba3f5e4148330a68a485f920 | [
"MIT"
] | null | null | null | 315.813649 | 661,318 | 0.905987 | [
[
[
"# LeNet",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"import torch\nimport random\nimport numpy as np\n\nrandom.seed(0)\nnp.random.seed(0)\ntorch.manual_seed(0)\ntorch.cuda.manual_seed(0)\ntorch.backends.cudnn.deterministic = True",
"_____no_output_____"
],
[
"import torchvision.datasets",
"_____no_output_____"
],
[
"MNIST_train = torchvision.datasets.MNIST('./', download=True, train=True)\nMNIST_test = torchvision.datasets.MNIST('./', download=True, train=False)\n",
"Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to ./MNIST/raw/train-images-idx3-ubyte.gz\n"
],
[
"X_train = MNIST_train.train_data\ny_train = MNIST_train.train_labels\nX_test = MNIST_test.test_data\ny_test = MNIST_test.test_labels",
"/usr/local/lib/python3.7/dist-packages/torchvision/datasets/mnist.py:62: UserWarning: train_data has been renamed data\n warnings.warn(\"train_data has been renamed data\")\n/usr/local/lib/python3.7/dist-packages/torchvision/datasets/mnist.py:52: UserWarning: train_labels has been renamed targets\n warnings.warn(\"train_labels has been renamed targets\")\n/usr/local/lib/python3.7/dist-packages/torchvision/datasets/mnist.py:67: UserWarning: test_data has been renamed data\n warnings.warn(\"test_data has been renamed data\")\n/usr/local/lib/python3.7/dist-packages/torchvision/datasets/mnist.py:57: UserWarning: test_labels has been renamed targets\n warnings.warn(\"test_labels has been renamed targets\")\n"
],
[
"X_train",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
],
[
"len(y_train), len(y_test)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nplt.imshow(X_train[0, :, :])\nplt.show()\nprint(y_train[0])",
"_____no_output_____"
]
],
[
[
"хотим передавать картинку, как трехмерный тензор",
"_____no_output_____"
]
],
[
[
"X_train = X_train.unsqueeze(1).float()\nX_test = X_test.unsqueeze(1).float()",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
],
[
"X_train",
"_____no_output_____"
],
[
"class LeNet5(torch.nn.Module):\n def __init__(self):\n super(LeNet5, self).__init__()\n \n self.conv1 = torch.nn.Conv2d(\n in_channels=1, out_channels=6, kernel_size=5, padding=2) # у нас 28 на 28, чтобы не терять размерность картинки, делаем паддинг\n self.act1 = torch.nn.Tanh()\n self.pool1 = torch.nn.AvgPool2d(kernel_size=2, stride=2)\n \n self.conv2 = torch.nn.Conv2d(\n in_channels=6, out_channels=16, kernel_size=5, padding=0)\n self.act2 = torch.nn.Tanh()\n self.pool2 = torch.nn.AvgPool2d(kernel_size=2, stride=2)\n \n self.fc1 = torch.nn.Linear(5 * 5 * 16, 120)\n self.act3 = torch.nn.Tanh()\n \n self.fc2 = torch.nn.Linear(120, 84)\n self.act4 = torch.nn.Tanh()\n \n self.fc3 = torch.nn.Linear(84, 10)\n \n def forward(self, x):\n \n x = self.conv1(x)\n x = self.act1(x)\n x = self.pool1(x)\n \n x = self.conv2(x)\n x = self.act2(x)\n x = self.pool2(x)\n \n x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))\n\n x = self.fc1(x)\n x = self.act3(x)\n x = self.fc2(x)\n x = self.act4(x)\n x = self.fc3(x)\n \n return x\n \nlenet5 = LeNet5()",
"_____no_output_____"
]
],
[
[
"У PyTorch-тензоров есть функция view, которая наш тензор преобразует к нужной размерности. Первая размерность будет x.size[0] -- это размер батча, а дальше тензор будет одномерный, соответственно мы вот эти три размерности должны просто перемножить и получить вот здесь 400. ",
"_____no_output_____"
]
],
[
[
"device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nlenet5 = lenet5.to(device)",
"_____no_output_____"
],
[
"loss = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(lenet5.parameters(), lr=1.0e-3)",
"_____no_output_____"
],
[
"batch_size = 100\n\ntest_accuracy_history = []\ntest_loss_history = []\n\nX_test = X_test.to(device)\ny_test = y_test.to(device)\n\nfor epoch in range(10000):\n order = np.random.permutation(len(X_train))\n for start_index in range(0, len(X_train), batch_size):\n optimizer.zero_grad()\n \n batch_indexes = order[start_index:start_index+batch_size]\n \n X_batch = X_train[batch_indexes].to(device)\n y_batch = y_train[batch_indexes].to(device)\n \n preds = lenet5.forward(X_batch) \n \n loss_value = loss(preds, y_batch)\n loss_value.backward()\n \n optimizer.step()\n \n test_preds = lenet5.forward(X_test)\n test_loss_history.append(loss(test_preds, y_test).data.cpu())\n \n accuracy = (test_preds.argmax(dim=1) == y_test).float().mean().data.cpu()\n test_accuracy_history.append(accuracy)\n \n print(accuracy)",
"tensor(0.9748)\ntensor(0.9845)\ntensor(0.9852)\ntensor(0.9868)\ntensor(0.9863)\ntensor(0.9875)\ntensor(0.9884)\ntensor(0.9877)\ntensor(0.9866)\ntensor(0.9864)\ntensor(0.9893)\ntensor(0.9885)\ntensor(0.9889)\ntensor(0.9886)\ntensor(0.9901)\ntensor(0.9879)\ntensor(0.9893)\ntensor(0.9896)\ntensor(0.9884)\ntensor(0.9895)\ntensor(0.9897)\ntensor(0.9889)\ntensor(0.9883)\ntensor(0.9871)\ntensor(0.9881)\ntensor(0.9862)\ntensor(0.9879)\ntensor(0.9897)\ntensor(0.9878)\ntensor(0.9889)\ntensor(0.9886)\ntensor(0.9896)\ntensor(0.9886)\ntensor(0.9888)\ntensor(0.9890)\ntensor(0.9906)\ntensor(0.9905)\ntensor(0.9873)\ntensor(0.9889)\ntensor(0.9882)\ntensor(0.9879)\ntensor(0.9895)\ntensor(0.9883)\ntensor(0.9877)\ntensor(0.9876)\ntensor(0.9885)\ntensor(0.9899)\ntensor(0.9886)\ntensor(0.9879)\ntensor(0.9892)\ntensor(0.9889)\ntensor(0.9880)\ntensor(0.9883)\ntensor(0.9881)\ntensor(0.9863)\ntensor(0.9883)\ntensor(0.9888)\ntensor(0.9894)\ntensor(0.9888)\ntensor(0.9894)\ntensor(0.9892)\ntensor(0.9896)\ntensor(0.9899)\ntensor(0.9898)\ntensor(0.9902)\ntensor(0.9901)\ntensor(0.9902)\n"
],
[
"lenet5.forward(X_test)",
"_____no_output_____"
],
[
"plt.plot(test_accuracy_history);\n# plt.plot(test_loss_history);",
"_____no_output_____"
]
],
[
[
"Здача\n",
"_____no_output_____"
]
],
[
[
"import torch\n\nN = 4\nC = 3\nC_out = 10\nH = 8\nW = 16\n\nx = torch.ones((N, C, H, W))\nx.shape",
"_____no_output_____"
],
[
"# torch.Size([4, 10, 8, 16])\nout1 = torch.nn.Conv2d(C, C_out, kernel_size=(3, 3), padding=1)(x)\nprint(out1.shape) # для самопроверки",
"torch.Size([4, 10, 8, 16])\n"
],
[
"# torch.Size([4, 10, 8, 16])\nout2 = torch.nn.Conv2d(C, C_out, kernel_size=(5, 5), padding=2)(x)\nprint(out2.shape) # для самопроверки",
"torch.Size([4, 10, 8, 16])\n"
],
[
"# torch.Size([4, 10, 8, 16])\nout3 = torch.nn.Conv2d(C, C_out, kernel_size=(7, 7), padding=3)(x)\nprint(out3.shape) # для самопроверки",
"torch.Size([4, 10, 8, 16])\n"
],
[
"# torch.Size([4, 10, 8, 16])\nout4 = torch.nn.Conv2d(C, C_out, kernel_size=(9, 9), padding=4)(x)\nprint(out4.shape) # для самопроверки",
"torch.Size([4, 10, 8, 16])\n"
],
[
"# torch.Size([4, 10, 8, 16])\nout5 = torch.nn.Conv2d(C, C_out, kernel_size=(3, 5), padding=(1,2))(x)\nprint(out5.shape) # для самопроверки",
"torch.Size([4, 10, 8, 16])\n"
],
[
"# torch.Size([4, 10, 22, 30])\nout6 = torch.nn.Conv2d(C, C_out, kernel_size=(3, 3), padding=(8,8))(x)\nprint(out6.shape) # для самопроверки",
"torch.Size([4, 10, 22, 30])\n"
],
[
"# torch.Size([4, 10, 7, 15])\nout7 = torch.nn.Conv2d(C, C_out, kernel_size=(4, 4), padding=1)(x)\nprint(out7.shape) # для самопроверки",
"torch.Size([4, 10, 7, 15])\n"
],
[
"# torch.Size([4, 10, 9, 17])\nout8 = torch.nn.Conv2d(C, C_out, kernel_size=(2, 2), padding=1)(x)\nprint(out8.shape) # для самопроверки",
"torch.Size([4, 10, 9, 17])\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79be5e7d2f92863b5bc511cef8a58d5d4bc80d2 | 14,616 | ipynb | Jupyter Notebook | example/preferred_image_shortest_demo.ipynb | kencan7749/pytorch_cnn_preferred | 4c0ceba853a1c37143d42dfd0e314091be131f4d | [
"MIT"
] | 13 | 2019-03-16T14:47:31.000Z | 2020-11-17T16:49:16.000Z | example/preferred_image_shortest_demo.ipynb | kencan7749/pytorch_cnn_preferred | 4c0ceba853a1c37143d42dfd0e314091be131f4d | [
"MIT"
] | 3 | 2019-05-07T08:04:41.000Z | 2019-08-29T02:25:59.000Z | example/preferred_image_shortest_demo.ipynb | kencan7749/pytorch_cnn_preferred | 4c0ceba853a1c37143d42dfd0e314091be131f4d | [
"MIT"
] | 1 | 2019-09-15T07:58:50.000Z | 2019-09-15T07:58:50.000Z | 38.26178 | 96 | 0.530378 | [
[
[
"# module import\nimport os\nimport sys\n\nimport numpy as np\nimport PIL.Image\nimport torch\nimport torchvision\n \nsys.path.append('../cnn_preferred')\nfrom utils import get_cnn_features, normalise_img\nfrom activation_maximization import generate_preferred",
"_____no_output_____"
],
[
"# step1: load network\nnet = torchvision.models.alexnet(pretrained=True)\nnet.eval()",
"_____no_output_____"
],
[
"# before step2, visualize the network to understand network archtecture\nprint(net)",
"AlexNet(\n (features): Sequential(\n (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n (1): ReLU(inplace)\n (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n (4): ReLU(inplace)\n (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (7): ReLU(inplace)\n (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (9): ReLU(inplace)\n (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (11): ReLU(inplace)\n (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n )\n (classifier): Sequential(\n (0): Dropout(p=0.5)\n (1): Linear(in_features=9216, out_features=4096, bias=True)\n (2): ReLU(inplace)\n (3): Dropout(p=0.5)\n (4): Linear(in_features=4096, out_features=4096, bias=True)\n (5): ReLU(inplace)\n (6): Linear(in_features=4096, out_features=1000, bias=True)\n )\n)\n"
],
[
"# step2: set target layer and the channel to visualise\n# Set target layer like...\n## target_layer = \"features[8]\"\ntarget_layer = \"classifier[6]\"\n## target_layer = \"features[10]\"\n\n#target layer setting to run by \"exec\" function\ntarget_layer_list = [target_layer]\n# set the target channel\ntarget_channel = 11",
"_____no_output_____"
],
[
"# step3: perform activation maximization\npreferred_stim = generate_preferred(net, target_layer_list, target_channel)",
"iter=1; mean(abs(feat))=0.539225;\niter=2; mean(abs(feat))=0.341679;\niter=3; mean(abs(feat))=3.61749;\niter=4; mean(abs(feat))=4.76636;\niter=5; mean(abs(feat))=5.88315;\niter=6; mean(abs(feat))=6.51545;\niter=7; mean(abs(feat))=9.82365;\niter=8; mean(abs(feat))=8.21605;\niter=9; mean(abs(feat))=9.93479;\niter=10; mean(abs(feat))=12.8297;\niter=11; mean(abs(feat))=9.76009;\niter=12; mean(abs(feat))=12.636;\niter=13; mean(abs(feat))=11.5907;\niter=14; mean(abs(feat))=15.3642;\niter=15; mean(abs(feat))=12.5635;\niter=16; mean(abs(feat))=16.3456;\niter=17; mean(abs(feat))=19.6163;\niter=18; mean(abs(feat))=21.6384;\niter=19; mean(abs(feat))=23.9717;\niter=20; mean(abs(feat))=18.7255;\niter=21; mean(abs(feat))=15.9115;\niter=22; mean(abs(feat))=14.9073;\niter=23; mean(abs(feat))=16.7444;\niter=24; mean(abs(feat))=19.3635;\niter=25; mean(abs(feat))=17.7871;\niter=26; mean(abs(feat))=19.2707;\niter=27; mean(abs(feat))=17.1797;\niter=28; mean(abs(feat))=24.5274;\niter=29; mean(abs(feat))=20.3421;\niter=30; mean(abs(feat))=23.0465;\niter=31; mean(abs(feat))=26.2575;\niter=32; mean(abs(feat))=24.6852;\niter=33; mean(abs(feat))=24.2485;\niter=34; mean(abs(feat))=25.1603;\niter=35; mean(abs(feat))=22.0328;\niter=36; mean(abs(feat))=30.9242;\niter=37; mean(abs(feat))=20.9158;\niter=38; mean(abs(feat))=24.6476;\niter=39; mean(abs(feat))=27.8181;\niter=40; mean(abs(feat))=25.508;\niter=41; mean(abs(feat))=24.1948;\niter=42; mean(abs(feat))=28.1837;\niter=43; mean(abs(feat))=30.7647;\niter=44; mean(abs(feat))=26.2534;\niter=45; mean(abs(feat))=35.7242;\niter=46; mean(abs(feat))=28.9765;\niter=47; mean(abs(feat))=24.4596;\niter=48; mean(abs(feat))=34.0609;\niter=49; mean(abs(feat))=29.0028;\niter=50; mean(abs(feat))=32.2237;\niter=51; mean(abs(feat))=36.3249;\niter=52; mean(abs(feat))=38.1331;\niter=53; mean(abs(feat))=38.0622;\niter=54; mean(abs(feat))=41.9875;\niter=55; mean(abs(feat))=33.6073;\niter=56; mean(abs(feat))=33.5633;\niter=57; mean(abs(feat))=47.7839;\niter=58; mean(abs(feat))=31.1749;\niter=59; mean(abs(feat))=34.6397;\niter=60; mean(abs(feat))=34.7746;\niter=61; mean(abs(feat))=38.0107;\niter=62; mean(abs(feat))=42.1785;\niter=63; mean(abs(feat))=37.6415;\niter=64; mean(abs(feat))=44.4138;\niter=65; mean(abs(feat))=46.2392;\niter=66; mean(abs(feat))=43.1839;\niter=67; mean(abs(feat))=36.5085;\niter=68; mean(abs(feat))=38.5373;\niter=69; mean(abs(feat))=38.277;\niter=70; mean(abs(feat))=56.6722;\niter=71; mean(abs(feat))=51.3832;\niter=72; mean(abs(feat))=65.2464;\niter=73; mean(abs(feat))=56.3342;\niter=74; mean(abs(feat))=68.1945;\niter=75; mean(abs(feat))=59.8834;\niter=76; mean(abs(feat))=45.5493;\niter=77; mean(abs(feat))=59.574;\niter=78; mean(abs(feat))=65.3569;\niter=79; mean(abs(feat))=49.7304;\niter=80; mean(abs(feat))=64.4652;\niter=81; mean(abs(feat))=55.9058;\niter=82; mean(abs(feat))=62.7726;\niter=83; mean(abs(feat))=64.2135;\niter=84; mean(abs(feat))=74.0669;\niter=85; mean(abs(feat))=54.6759;\niter=86; mean(abs(feat))=40.7471;\niter=87; mean(abs(feat))=63.9843;\niter=88; mean(abs(feat))=82.7287;\niter=89; mean(abs(feat))=78.3161;\niter=90; mean(abs(feat))=93.2338;\niter=91; mean(abs(feat))=66.7787;\niter=92; mean(abs(feat))=65.4058;\niter=93; mean(abs(feat))=79.7188;\niter=94; mean(abs(feat))=64.2377;\niter=95; mean(abs(feat))=97.7929;\niter=96; mean(abs(feat))=73.3485;\niter=97; mean(abs(feat))=95.6696;\niter=98; mean(abs(feat))=48.4622;\niter=99; mean(abs(feat))=68.344;\niter=100; mean(abs(feat))=87.6209;\niter=101; mean(abs(feat))=71.5965;\niter=102; mean(abs(feat))=91.2861;\niter=103; mean(abs(feat))=59.3372;\niter=104; mean(abs(feat))=86.3558;\niter=105; mean(abs(feat))=61.1659;\niter=106; mean(abs(feat))=84.0086;\niter=107; mean(abs(feat))=93.9646;\niter=108; mean(abs(feat))=64.9454;\niter=109; mean(abs(feat))=97.9516;\niter=110; mean(abs(feat))=81.2423;\niter=111; mean(abs(feat))=76.5289;\niter=112; mean(abs(feat))=93.1628;\niter=113; mean(abs(feat))=87.5823;\niter=114; mean(abs(feat))=113.439;\niter=115; mean(abs(feat))=94.6882;\niter=116; mean(abs(feat))=74.3647;\niter=117; mean(abs(feat))=60.0826;\niter=118; mean(abs(feat))=59.1125;\niter=119; mean(abs(feat))=134.634;\niter=120; mean(abs(feat))=92.9296;\niter=121; mean(abs(feat))=99.781;\niter=122; mean(abs(feat))=127.305;\niter=123; mean(abs(feat))=85.6475;\niter=124; mean(abs(feat))=139.675;\niter=125; mean(abs(feat))=123.641;\niter=126; mean(abs(feat))=107.178;\niter=127; mean(abs(feat))=126.222;\niter=128; mean(abs(feat))=125.965;\niter=129; mean(abs(feat))=152.577;\niter=130; mean(abs(feat))=181.65;\niter=131; mean(abs(feat))=175.097;\niter=132; mean(abs(feat))=156.308;\niter=133; mean(abs(feat))=137.856;\niter=134; mean(abs(feat))=161.992;\niter=135; mean(abs(feat))=120.58;\niter=136; mean(abs(feat))=176.938;\niter=137; mean(abs(feat))=159.244;\niter=138; mean(abs(feat))=179.768;\niter=139; mean(abs(feat))=120.922;\niter=140; mean(abs(feat))=128.838;\niter=141; mean(abs(feat))=190.335;\niter=142; mean(abs(feat))=145.362;\niter=143; mean(abs(feat))=295.52;\niter=144; mean(abs(feat))=138.616;\niter=145; mean(abs(feat))=223.137;\niter=146; mean(abs(feat))=146.939;\niter=147; mean(abs(feat))=229.299;\niter=148; mean(abs(feat))=179.729;\niter=149; mean(abs(feat))=240.819;\niter=150; mean(abs(feat))=105.953;\niter=151; mean(abs(feat))=131.212;\niter=152; mean(abs(feat))=206.036;\niter=153; mean(abs(feat))=216.01;\niter=154; mean(abs(feat))=176.403;\niter=155; mean(abs(feat))=203.01;\niter=156; mean(abs(feat))=197.744;\niter=157; mean(abs(feat))=179.721;\niter=158; mean(abs(feat))=130.866;\niter=159; mean(abs(feat))=191.884;\niter=160; mean(abs(feat))=245.42;\niter=161; mean(abs(feat))=313.384;\niter=162; mean(abs(feat))=232.185;\niter=163; mean(abs(feat))=311.105;\niter=164; mean(abs(feat))=190.608;\niter=165; mean(abs(feat))=248.92;\niter=166; mean(abs(feat))=256.709;\niter=167; mean(abs(feat))=332.149;\niter=168; mean(abs(feat))=311.72;\niter=169; mean(abs(feat))=268.515;\niter=170; mean(abs(feat))=222.385;\niter=171; mean(abs(feat))=235.843;\niter=172; mean(abs(feat))=335.877;\niter=173; mean(abs(feat))=283.496;\niter=174; mean(abs(feat))=360.679;\niter=175; mean(abs(feat))=260.424;\niter=176; mean(abs(feat))=445.08;\niter=177; mean(abs(feat))=455.572;\niter=178; mean(abs(feat))=396.853;\niter=179; mean(abs(feat))=300.062;\niter=180; mean(abs(feat))=355.433;\niter=181; mean(abs(feat))=380.909;\niter=182; mean(abs(feat))=405.037;\niter=183; mean(abs(feat))=462.48;\niter=184; mean(abs(feat))=493.199;\niter=185; mean(abs(feat))=398.66;\niter=186; mean(abs(feat))=369.481;\niter=187; mean(abs(feat))=429.186;\niter=188; mean(abs(feat))=488.923;\niter=189; mean(abs(feat))=673.11;\niter=190; mean(abs(feat))=623.574;\niter=191; mean(abs(feat))=431.609;\niter=192; mean(abs(feat))=671.618;\niter=193; mean(abs(feat))=663.357;\niter=194; mean(abs(feat))=617.775;\niter=195; mean(abs(feat))=554.124;\niter=196; mean(abs(feat))=580.797;\niter=197; mean(abs(feat))=693.311;\niter=198; mean(abs(feat))=609.245;\niter=199; mean(abs(feat))=879.838;\niter=200; mean(abs(feat))=749.458;\n"
],
[
"# step4: save the preferred image\nsave_name = 'demo.jpg'\nPIL.Image.fromarray(normalise_img(preferred_stim)).save(save_name)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79beb31876bb6db2c4d62594a9d558938909ddc | 98,507 | ipynb | Jupyter Notebook | XGBoost-RFECV-RoF-St.Johns.ipynb | SadafGharaati/Important-factors | c159e1c783a10af7ba92ff0828542349282609b4 | [
"Unlicense"
] | 1 | 2022-03-23T02:53:37.000Z | 2022-03-23T02:53:37.000Z | XGBoost-RFECV-RoF-St.Johns.ipynb | SadafGharaati/Important-factors | c159e1c783a10af7ba92ff0828542349282609b4 | [
"Unlicense"
] | null | null | null | XGBoost-RFECV-RoF-St.Johns.ipynb | SadafGharaati/Important-factors | c159e1c783a10af7ba92ff0828542349282609b4 | [
"Unlicense"
] | null | null | null | 89.470481 | 31,896 | 0.75952 | [
[
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.model_selection import train_test_split, GridSearchCV, KFold, RandomizedSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score,r2_score\nimport xgboost \nfrom xgboost import XGBRFRegressor, XGBRegressor\nfrom sklearn.metrics import make_scorer\nr2_score = make_scorer(r2_score)",
"_____no_output_____"
]
],
[
[
"# In this note book the following steps are taken:\n1. Find the best hyper parameters for estimator\n2. Find the most important features by tunned random forest\n3. Comapring r2 of the tuuned full model and model with selected features\n4. Furthur step is finding tuned model with selected features and comparing the hyper parameters",
"_____no_output_____"
]
],
[
[
"#import data\nData=pd.read_csv(\"St.Johns-Transfomed-Data.csv\")",
"_____no_output_____"
],
[
"X = Data.iloc[:,:-1]\ny = Data.iloc[:,-1]",
"_____no_output_____"
],
[
"#split test and training set. total number of data is 330 so the test size cannot be large\nnp.random.seed(60)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20,\n random_state = 1000)",
"_____no_output_____"
],
[
"regressors = {}\nregressors.update({\"XGBoost\": XGBRegressor(random_state=1000)})\nFEATURE_IMPORTANCE = {\"XGBoost\"}",
"_____no_output_____"
],
[
"#Define range of hyperparameters for estimator\nnp.random.seed(60)\nparameters = {}\nparameters.update({\"XGBoost\": { \n \"regressor__learning_rate\":[0.001,0.01,0.02,0.1,0.25,0.5,1],\n \"regressor__gamma\":[0.001,0.01,0.02,0.1,0.25,0.5,1],\n \"regressor__max_depth\" : [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],\n \"regressor__reg_alpha\":[0.001,0.01,0.02,0.1],\n \"regressor__reg_lambda\":[0.001,0.01,0.02,0.1],\n \"regressor__min_child_weight\":[0.001,0.01,0.02,0.1]}\n})",
"_____no_output_____"
],
[
"# Make correlation matrix\ncorr_matrix = X_train.corr(method = \"spearman\").abs()\n\n# Draw the heatmap\nsns.set(font_scale = 1.0)\nf, ax = plt.subplots(figsize=(11, 9))\nsns.heatmap(corr_matrix, cmap= \"YlGnBu\", square=True, ax = ax)\nf.tight_layout()\nplt.savefig(\"correlation_matrix.png\", dpi = 1080)\n\n# Select upper triangle of matrix\nupper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool))\n\n# Find index of feature columns with correlation greater than 0.8\nto_drop = [column for column in upper.columns if any(upper[column] > 0.8)]\n\n# Drop features\nX_train = X_train.drop(to_drop, axis = 1)\nX_test = X_test.drop(to_drop, axis = 1)",
"_____no_output_____"
],
[
"X_train",
"_____no_output_____"
],
[
"FEATURE_IMPORTANCE = {\"XGBoost\"}\nselected_regressor = \"XGBoost\"\nregressor = regressors[selected_regressor]",
"_____no_output_____"
],
[
"results = {}\nfor regressor_label, regressor in regressors.items():\n # Print message to user\n print(f\"Now tuning {regressor_label}.\")",
"Now tuning XGBoost.\n"
],
[
"scaler = StandardScaler()\nsteps = [(\"scaler\", scaler), (\"regressor\", regressor)]\npipeline = Pipeline(steps = steps)",
"_____no_output_____"
],
[
"#Define parameters that we want to use in gridsearch cv\nparam_grid = parameters[selected_regressor]",
"_____no_output_____"
],
[
"# Initialize GridSearch object for estimator\ngscv = RandomizedSearchCV(pipeline, param_grid, cv = 3, n_jobs= -1, verbose = 1, scoring = r2_score, n_iter=20)",
"_____no_output_____"
],
[
"# Fit gscv (Tunes estimator)\nprint(f\"Now tuning {selected_regressor}. Go grab a beer or something.\")\ngscv.fit(X_train, np.ravel(y_train)) ",
"Now tuning XGBoost. Go grab a beer or something.\nFitting 3 folds for each of 20 candidates, totalling 60 fits\n"
],
[
"#Getting the best hyperparameters\nbest_params = gscv.best_params_\nbest_params",
"_____no_output_____"
],
[
"#Getting the best score of model\nbest_score = gscv.best_score_\nbest_score",
"_____no_output_____"
],
[
"#Check overfitting of the estimator\nfrom sklearn.model_selection import cross_val_score\nmod = XGBRegressor(gamma= 0.001,\n learning_rate= 0.5,\n max_depth=3, \n min_child_weight= 0.001, \n reg_alpha=0.1,\n reg_lambda = 0.1 ,random_state=10000)\n\nscores_test = cross_val_score(mod, X_test, y_test, scoring='r2', cv=5)\n\nscores_test",
"_____no_output_____"
],
[
"tuned_params = {item[11:]: best_params[item] for item in best_params}\nregressor.set_params(**tuned_params)",
"_____no_output_____"
],
[
"#Find r2 of the model with all features (Model is tuned for all features)\nresults={}\nmodel=regressor.set_params(gamma= 0.001,\n learning_rate= 0.5,\n max_depth=3, \n min_child_weight= 0.001, \n reg_alpha=0.1,\n reg_lambda = 0.1 ,random_state=10000)\n\nmodel.fit(X_train,y_train)\ny_pred = model.predict(X_test)\nR2 = metrics.r2_score(y_test, y_pred)\nresults = {\"classifier\": model,\n \"Best Parameters\": best_params,\n \"Training r2\": best_score*100,\n \"Test r2\": R2*100}\nresults",
"_____no_output_____"
],
[
"# Select Features using RFECV\nclass PipelineRFE(Pipeline):\n # Source: https://ramhiser.com/post/2018-03-25-feature-selection-with-scikit-learn-pipeline/\n def fit(self, X, y=None, **fit_params):\n super(PipelineRFE, self).fit(X, y, **fit_params)\n self.feature_importances_ = self.steps[-1][-1].feature_importances_\n return self",
"_____no_output_____"
],
[
"steps = [(\"scaler\", scaler), (\"regressor\", regressor)]\npipe = PipelineRFE(steps = steps)\nnp.random.seed(60)\n\n# Initialize RFECV object\nfeature_selector = RFECV(pipe, cv = 5, step = 1, verbose = 1)\n\n# Fit RFECV\nfeature_selector.fit(X_train, np.ravel(y_train))\n\n# Get selected features\nfeature_names = X_train.columns\nselected_features = feature_names[feature_selector.support_].tolist()",
"Fitting estimator with 7 features.\nFitting estimator with 6 features.\nFitting estimator with 5 features.\nFitting estimator with 4 features.\nFitting estimator with 3 features.\nFitting estimator with 2 features.\nFitting estimator with 7 features.\nFitting estimator with 6 features.\nFitting estimator with 5 features.\nFitting estimator with 4 features.\nFitting estimator with 3 features.\nFitting estimator with 2 features.\nFitting estimator with 7 features.\nFitting estimator with 6 features.\nFitting estimator with 5 features.\nFitting estimator with 4 features.\nFitting estimator with 3 features.\nFitting estimator with 2 features.\nFitting estimator with 7 features.\nFitting estimator with 6 features.\nFitting estimator with 5 features.\nFitting estimator with 4 features.\nFitting estimator with 3 features.\nFitting estimator with 2 features.\nFitting estimator with 7 features.\nFitting estimator with 6 features.\nFitting estimator with 5 features.\nFitting estimator with 4 features.\nFitting estimator with 3 features.\nFitting estimator with 2 features.\nFitting estimator with 7 features.\nFitting estimator with 6 features.\nFitting estimator with 5 features.\nFitting estimator with 4 features.\nFitting estimator with 3 features.\nFitting estimator with 2 features.\n"
],
[
"performance_curve = {\"Number of Features\": list(range(1, len(feature_names) + 1)),\n \"R2\": feature_selector.grid_scores_}\nperformance_curve = pd.DataFrame(performance_curve)\n\n# Performance vs Number of Features\n# Set graph style\nsns.set(font_scale = 1.75)\nsns.set_style({\"axes.facecolor\": \"1.0\", \"axes.edgecolor\": \"0.85\", \"grid.color\": \"0.85\",\n \"grid.linestyle\": \"-\", 'axes.labelcolor': '0.4', \"xtick.color\": \"0.4\",\n 'ytick.color': '0.4'})\ncolors = sns.color_palette(\"RdYlGn\", 20)\nline_color = colors[3]\nmarker_colors = colors[-1]\n\n# Plot\nf, ax = plt.subplots(figsize=(13, 6.5))\nsns.lineplot(x = \"Number of Features\", y = \"R2\", data = performance_curve,\n color = line_color, lw = 4, ax = ax)\nsns.regplot(x = performance_curve[\"Number of Features\"], y = performance_curve[\"R2\"],\n color = marker_colors, fit_reg = False, scatter_kws = {\"s\": 200}, ax = ax)\n\n# Axes limits\nplt.xlim(0.5, len(feature_names)+0.5)\nplt.ylim(0.60, 1)\n\n# Generate a bolded horizontal line at y = 0\nax.axhline(y = 0.625, color = 'black', linewidth = 1.3, alpha = .7)\n\n# Turn frame off\nax.set_frame_on(False)\n\n# Tight layout\nplt.tight_layout()",
"_____no_output_____"
],
[
"#Define new training and test set based based on selected features by RFECV\nX_train_rfecv = X_train[selected_features]\nX_test_rfecv= X_test[selected_features]",
"_____no_output_____"
],
[
"np.random.seed(60)\nregressor.fit(X_train_rfecv, np.ravel(y_train))",
"_____no_output_____"
],
[
"#Finding important features\nnp.random.seed(60)\nfeature_importance = pd.DataFrame(selected_features, columns = [\"Feature Label\"])\nfeature_importance[\"Feature Importance\"] = regressor.feature_importances_\nfeature_importance = feature_importance.sort_values(by=\"Feature Importance\", ascending=False)\nfeature_importance",
"_____no_output_____"
],
[
"# Initialize GridSearch object for model with selected features\nnp.random.seed(60)\ngscv = RandomizedSearchCV(pipeline, param_grid, cv = 3, n_jobs= -1, verbose = 1, scoring = r2_score, n_iter=20)",
"_____no_output_____"
],
[
"#Tuning random forest classifier with selected features \nnp.random.seed(60)\ngscv.fit(X_train_rfecv,y_train) ",
"Fitting 3 folds for each of 20 candidates, totalling 60 fits\n"
],
[
"#Getting the best parameters of model with selected features\nbest_params = gscv.best_params_\nbest_params",
"_____no_output_____"
],
[
"#Getting the score of model with selected features\nbest_score = gscv.best_score_\nbest_score",
"_____no_output_____"
],
[
"#Check overfitting of the tuned model with selected features \nfrom sklearn.model_selection import cross_val_score\nmod = XGBRegressor(gamma= 0.001,\n learning_rate= 0.5,\n max_depth=3, \n min_child_weight= 0.001, \n reg_alpha=0.1, \n reg_lambda = 0.1 ,random_state=10000)\n\nscores_test = cross_val_score(mod, X_test_rfecv, y_test, scoring='r2', cv=5)\n\nscores_test",
"_____no_output_____"
],
[
"results={}\nmodel=regressor.set_params(gamma= 0.001,\n learning_rate= 0.5,\n max_depth=3, \n min_child_weight= 0.001, \n reg_alpha=0.1, \n reg_lambda = 0.1 ,random_state=10000)\nmodel.fit(X_train_rfecv,y_train)\ny_pred = model.predict(X_test_rfecv)\nR2 = metrics.r2_score(y_test, y_pred)\nresults = {\"classifier\": model,\n \"Best Parameters\": best_params,\n \"Training r2\": best_score*100,\n \"Test r2\": R2*100}\nresults",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79bf085b3b2869686d32954508bbf5a0dd566d1 | 29,039 | ipynb | Jupyter Notebook | Building_and_submitting_search_queries_to_AGRIS.ipynb | herculespan/customNERforAgriEntities | 00351f345336b3b3c54eb9daa21e0c9b5d3f0962 | [
"MIT"
] | null | null | null | Building_and_submitting_search_queries_to_AGRIS.ipynb | herculespan/customNERforAgriEntities | 00351f345336b3b3c54eb9daa21e0c9b5d3f0962 | [
"MIT"
] | null | null | null | Building_and_submitting_search_queries_to_AGRIS.ipynb | herculespan/customNERforAgriEntities | 00351f345336b3b3c54eb9daa21e0c9b5d3f0962 | [
"MIT"
] | null | null | null | 35.674447 | 568 | 0.568167 | [
[
[
"<h2>Building and submitting search queries to AGRIS</h2>\n<p>This script is used with the aim to <b>submit a search query</b> to the (<a href = https://agris.fao.org/agris-search/biblio.action?>AGRIS database</a>) and <b>retrieve the list of the URLs</b> (or a subset of the returned URLs) directing to the <b>search results</b>. The <b>result URLs</b> that are obtained are <b>stored in a txt file</b> in order to be used for <b>scraping the AGRIS database</b> for relevant content (i.e., <b>abstracts</b> of publications available from the specific database) to be <b>used for text annotation-related purposes</b>.</p>",
"_____no_output_____"
],
[
"<p>The <b>first step</b> in the process of submitting a search query to the AGRIS database and receiving the result URLs is to <b>import the Python libraries and packages</b> that are <b>necessary</b> for the <b>execution of this task</b>.</p>",
"_____no_output_____"
]
],
[
[
"import requests\nfrom bs4 import BeautifulSoup",
"_____no_output_____"
]
],
[
[
"<p>The <code>findNumOfTokens</code> function is <b>defined</b> and <b>used</b> with the aim to <b>enable the retrieval of the number of the search results</b> returned from the <b>submission</b> of the <b>query</b> to the <b>AGRIS database</b> (by making use of the <b>search parameters</b> presented and explained below).</p>",
"_____no_output_____"
]
],
[
[
"def findNumOfTokens(string):\n numOfTokens = len(string.split())\n return numOfTokens",
"_____no_output_____"
]
],
[
[
"<p>When builing a search query to submit to the AGRIS database, there is a <b>list of search parameters</b> that <b>need to be configured</b>. In other words, these parameters need to be assigned the <b>values</b> that will be used for the <b>execution of the search task</b> and the <b>retrieval of the result URLs</b>. These parameters are the following: \n<ul><li> <b>subject</b> (i.e., the <b>subject of the results</b> to be identified and returned - what the text documents/abstracts to be eventually retrieved need to be about);</li>\n <li> <b>result type</b> (AGRIS allows to execute searches <b>in regard to a list of predefined types</b>; these types are \"<code>Publications</code>\" and \"<code>Databsets</code>\");</li>\n <li> <b>start year</b> (i.e., the <b>year from which results</b> for the search query should be <b>identified</b> and <b>returned</b>);</li>\n <li> <b> end year</b> (i.e., the <b>year till which results</b> for the search query should be <b>identified</b> and <b>returned</b>);</li>\n <li> <b>country name</b> (i.e., the <b>name of the country</b> that the <b>content of the resources</b> to be identifed and retrieved with the help of the search results <b>should relate to</b>); </li>\n <li> <b>language</b> (i.e., the <b>language of the content of the resources</b> made available from the search results that are identified and retrieved);</li>\n <li> <b>content type</b> (i.e., the <b>type of the content of the resources</b> -theses, journal papers, reports, etc.- made available from the search results that are identified and retrieved); </li>\n</ul>\nTo build the search query by taking account of the values provided to the search parameters listed above (i.e., the <b>configurable part of the search query</b>), we <b>define</b> and <b>use</b> the <code>buildConfigurableQueryStr</code> function.</p> \n<p>The <b>input</b> provided <b>to the function</b> are the <b>values of the search parameters</b>. In addition, the function <b>takes into consideration</b> the <b>number of tokens</b> included <b>in the search query</b> when \"<b>constructing</b>\" <b>the value</b> to be finally provided to the <code>subject</code> parameter.</p>",
"_____no_output_____"
]
],
[
[
"def buildConfigurableQueryStr (subject, resultType, startYear, endYear, countryName, language, contentType):\n \n numOfTokensInSubj = findNumOfTokens(subject)\n if numOfTokensInSubj == 1:\n filterString = \"filterString=%2Bsubject%3A%28\" + subject + \"%29\"\n else:\n filterString = \"\"\n for subjectToken in subject.split():\n filterString = filterString + \"filterString=%2Bsubject%3A%28\" + subjectToken + \"%29\"\n \n typeresultsField = \"typeresultsField=\" + resultType\n \n fromDate = \"fromDate=\" + str(startYear)\n toDate = \"toDate=\" + str(endYear)\n \n if countryName == \"0\":\n country = \"country=\" + str(countryName)\n else:\n country = \"country=\" + countryName \n \n if language == \"0\":\n lang = \"lang=\" + str(0)\n else:\n lang = \"lang=\" + language\n \n if contentType == \"0\":\n typeToAdd = \"typeToAdd=\" + str(0)\n else:\n typeToAdd = \"typeToAdd=\" + contentType\n\n configurableQueryStr = filterString + \"&\" + typeresultsField + \"&\" + fromDate + \"&\" + toDate + \"&\" + country + \"&\" + lang + \"&\" + typeToAdd\n \n return configurableQueryStr",
"_____no_output_____"
]
],
[
[
"<p>Apart from the configurable part of the search query to be submitted to the AGRIS database, there is also a <b>part of the search query</b> consisting of <b>parameters that receive default values</b> (more specifically, most of those parameters receive <b>no values at all</b>!).</p> \n<p>This part of the search query can be named as the <b>default part of the search query</b>. The <b>parameters</b> receiving no values at all or specific values by default are: (i) <code>agrovocString</code>; (ii) <code>agrovocToRemove</code>; (iii) <code>advQuery</code>; (iv) <code>centerString</code>; (v) <code>centerToRemove</code>; (vi) <code>filterToRemove</code>; (vii) <code>typeString</code>; (viii) <code>typeToRemove</code>; and (ix) <code>filterQuery</code>.</p>",
"_____no_output_____"
]
],
[
[
"def AGRISqueryBuilder ():\n queryStr = \"\"\n \n # list of query parameters receiving no values\n paramsWithNullValues = [\"agrovocString=\", \"agrovocToRemove=\", \"advQuery=\", \"centerString=\", \"centerToRemove=\", \n \"filterToRemove=\", \"typeString=\", \"typeToRemove=\", \"filterQuery=\"]\n\n # concatenating the parameters with no values to start assemblying the AGRIS query string\n for param in paramsWithNullValues:\n queryStr = queryStr + param + \"&\"\n \n # list of query parameters with default values, such as onlyFullText, enableField and aggregatorField\n # onlyFullText = false --> access resources that may not provide access to a full-text version!\n # enableField = Disable --> multi-lingual search is disabled!\n # aggregatorField = Disable --> include records from aggregators!\n paramsWithDefaultValues = [\"onlyFullText=false\", \"operator=Required\", \"field=0\", \"enableField=Disable\", \n \"aggregatorField=Disable\"]\n \n for param in paramsWithDefaultValues:\n queryStr = queryStr + param + \"&\"\n \n return queryStr",
"_____no_output_____"
]
],
[
[
"<p>By calling the <code>AGRISqueryBuilder</code> function, we are able to <b>create the first part of the search query</b> that will be submitted to the AGRIS database (i.e., the <b>default part of the search query</b> containing the search parameters that receive default values or no value at all).</p>",
"_____no_output_____"
]
],
[
[
"queryStr_1 = AGRISqueryBuilder()",
"_____no_output_____"
]
],
[
[
"<h4>Assignment of values to the search parameters to be used for creating the configurable part of the serch query</h4>",
"_____no_output_____"
],
[
"<p><b>Step 1</b>: Subject of the search query.</p>",
"_____no_output_____"
]
],
[
[
"subject = input(\"Type in the subject of your search in AGRIS: \")",
"Type in the subject of your search in AGRIS: agriculture\n"
]
],
[
[
"<p><b>Step 2</b>: <b>Type of the results</b> to be retrieved (namely: \"<b>Publications</b>\", \"<b>Datasets</b>\" or both).</p>",
"_____no_output_____"
]
],
[
[
"resultType = input(\"Type in the type of results (i.e., 'Publications', 'Datasets', 'Both') you are interested in: \")",
"Type in the type of results (i.e., 'Publications', 'Datasets', 'Both') you are interested in: Publications\n"
]
],
[
[
"<p><b>Step 3</b>: <b>Starting year</b> from which results should become available.</p>",
"_____no_output_____"
]
],
[
[
" startYear = input(\"Find resources that have become available from this year and on: \")",
"Find resources that have become available from this year and on: 2000\n"
]
],
[
[
"<p><b>Step 4</b>: <b>Year</b> till which results should become available (i.e., <b>end year</b>).</p>",
"_____no_output_____"
]
],
[
[
"endYear = input(\"Find resources that have become available up until this year: \")",
"Find resources that have become available up until this year: 2021\n"
]
],
[
[
"<p><b>Step 5</b>: The <b>name of the country</b> that the <b>content of the resources</b> to be retrieved <b>should relate to</b>.</p>",
"_____no_output_____"
]
],
[
[
"countryName = input(\"Type in the name of the country the resource's content relates to. If not relevant, provide 0 as a value: \")",
"Type in the name of the country the resource's content relates to. If not relevant, provide 0 as a value: 0\n"
]
],
[
[
"<p><b>Step 6</b>: The <b>language of the content</b> that will become available from the resources to be retreved.</p>",
"_____no_output_____"
]
],
[
[
"language = input(\"Type in the language in which content should be made available. In the case of no particular preference provide 0 as a value: \")",
"Type in the language in which content should be made available. In the case of no particular preference provide 0 as a value: English\n"
]
],
[
[
"<p><b>Step 7</b>: The <b>type of the content</b> to be retrived (pertinent to the \"<b>Publications</b>\" result type - potential values are: theses, journal papers, reports, etc.).</p>",
"_____no_output_____"
]
],
[
[
"contentType = input(\"Provide the type of content you are interested in (applies only to Publications). If not relevant, provide 0 as a value: \")",
"Provide the type of content you are interested in (applies only to Publications). If not relevant, provide 0 as a value: 0\n"
]
],
[
[
"<p>By calling the <code>buildConfigurableQueryStr</code> function, we are able to <b>create the second part of the search query</b> that will be submitted to the AGRIS database (i.e., the <b>configurable part of the search query</b> containing the values provided to the search parameters as part of the steps executed above).</p>",
"_____no_output_____"
]
],
[
[
"queryStr_2 = buildConfigurableQueryStr(subject, resultType, startYear, endYear, countryName, language, contentType)",
"_____no_output_____"
]
],
[
[
"<p>The <b>search query</b> (i.e., the <code>baseQueryStr</code>) is <b>built</b> by <b>concatenating</b> the <b>default</b> (i.e., <code>queryStr_1</code>) and the <b>configurable part</b> (<code>queryStr_2</code>) of it.</p>",
"_____no_output_____"
]
],
[
[
"baseQueryStr = queryStr_1 + queryStr_2",
"_____no_output_____"
]
],
[
[
"<p><b>Display</b> the <b>search query</b> (i.e, the <code>baseQueryStr</code>) to be <b>finally submitted</b> to the AGRIS database.</p>",
"_____no_output_____"
]
],
[
[
"baseQueryStr",
"_____no_output_____"
]
],
[
[
"<p>The constructed <b>search query gets submitted</b> to the AGRIS database.</p>",
"_____no_output_____"
]
],
[
[
"response = requests.get(\"https://agris.fao.org/agris-search/biblio.do?\" + baseQueryStr)",
"_____no_output_____"
]
],
[
[
"<p><b>Printing out</b> the <b><code>status code</code> of the response</b> provided to the <b>query that has been submitted</b> in order to <b>receive feedback</b> on whether the <b>query submission</b> has <b>been successful or not</b> (a <b>response value equal to <code>200</code></b> reveals a <b>successful</b> query submission attempt!).</p>",
"_____no_output_____"
]
],
[
[
"response.status_code",
"_____no_output_____"
]
],
[
[
"<h4>Parsing content</h4> \n<p> The <b>page of the AGRIS database</b> that has been <b>retrieved</b> and <b>contains the results</b> related to the submitted query is <b>parsed</b> with the aim to <b>fetch the number of the search results</b>.</p>\n<p>To do so, a <b>parsing object</b> (namely, an <code>instance</code> of the <code>BeautifulSoup</code> <b>class</b>) aiming to find the classes having the \"<code>pull-left grey-scale-1 last</code>\" label (this is the section/part of the results page where the number of the search results becomes available) is created. The <b>execution</b> of the <code>find</code> <b>method</b> called on the <b>parsing object</b> will allow to <b>get the record</b> in which the <b>number of the search results</b> is contained.</p>",
"_____no_output_____"
]
],
[
[
"soup = BeautifulSoup(response.content, \"html.parser\")\nnumOfResultsRecord = soup.find(\"div\", class_ = \"pull-left grey-scale-1 last\")",
"_____no_output_____"
]
],
[
[
"<p>The <b>number of the search results</b> is <b>eventually retrieved</b> by <b> splitting</b> the <b>respective record</b> into pieces and <b>retrieving the appropriate one</b> (i.e., <b>piece</b>) after <b>converting it to an integer</b>. A check is also made to <b>figure out the existence</b> of the \"<b>,</b>\" <b>character</b> in the <b>results' number</b>. If <b>this is the case</b>, the \",\" sign is <b>removed</b>.</p>",
"_____no_output_____"
]
],
[
[
"if \",\" in numOfResultsRecord.find(\"p\").find(\"strong\").text.split()[-1]:\n numOfResults = int(numOfResultsRecord.find(\"p\").find(\"strong\").text.split()[-1].replace(\",\", \"\"))",
"_____no_output_____"
]
],
[
[
"<p><b>Displaying</b> the <b>number of the search results</b> that have been retrieved.</p>",
"_____no_output_____"
]
],
[
[
"numOfResults",
"_____no_output_____"
]
],
[
[
"<p>A <b>quick check</b> is done to <b>make sure</b> that <b>there are indeed results that have been retrieved</b> from the <b>execution</b> of the <b>search query</b>. If the <b>number of search results is not 0</b>, then there is a <b>request</b> for the <b>number of the search results to keep</b> (in the case that there are too may and all of them are needed!).</p>",
"_____no_output_____"
]
],
[
[
"if numOfResults != 0:\n numOfResultsToKeep = int(input(\"Type in the number of results to keep: \"))\nelse:\n print(\"No results have been found!\")",
"Type in the number of results to keep: 1000\n"
]
],
[
[
"<p>The section of the script provided below is about the <b>calculation of the number of iterations</b> to be made in order to <b>skim through all the search results to be kept</b> (based on the number of the search results to be kept provided above). This part is necessary because of the fact the search results provided by the AGRIS database become available in batches of 10. The following cases are considered:\n<ul><li>The <b>number of the search results</b> that have been returned is <b>exactly 10</b>.</li>\n <li>The <b>number of the search results</b> that have been returned is <b>more than 0 and less than 10</b>.</li>\n <li>The <b>number of the search results</b> that have been returned is a <b>multiple of 10</b>.</li>\n <li>The <b>number of the search results</b> that have been returned is <b>more than 10 but not an exact multiple of it</b>.</li>\n </ul></p>",
"_____no_output_____"
]
],
[
[
"if (numOfResultsToKeep // 10 == 1): \n numOfIterations = 1\nelif (numOfResultsToKeep // 10 == 0) and (numOfResultsToKeep % 10 > 0 and numOfResultsToKeep % 10 < 10):\n numOfIterations = 1\nelse:\n if numOfResultsToKeep % 10 == 0:\n numOfIterations = numOfResultsToKeep // 10\n else:\n numOfIterations = (numOfResultsToKeep // 10) + 1",
"_____no_output_____"
]
],
[
[
"<p><b>Priniting out</b> the <b>number of the iterations</b> that are <b>needed to retrieve</b> the <b>required number</b> of the <b>search result URLs</b>.</p>",
"_____no_output_____"
]
],
[
[
"numOfIterations",
"_____no_output_____"
]
],
[
[
"<p><b>Creating</b> a <b>text file</b> to <b>store</b> the <b>search result URLs</b>.</p>",
"_____no_output_____"
]
],
[
[
"fileName = input(\"Type in the name of the file to use of storing the query result URLs: \")",
"Type in the name of the file to use of storing the query result URLs: URLS_for_the_AGRIS_dataset\n"
],
[
"fullFileName = fileName + \".txt\"",
"_____no_output_____"
],
[
"file = open (fullFileName, \"w\")",
"_____no_output_____"
]
],
[
[
"<p><b>Iterating</b> over the search results, <b>retrieving</b> the <b>search result URLs</b>, and <b>writing</b>/<b>storing</b> the search result URLs into the text file. To execute the iteration, the <b>index</b> from which <b>results should be scanned from</b> is asked.</p>",
"_____no_output_____"
]
],
[
[
"startIndex = int(input(\"Index to start the retrieval of search results from: \"))",
"Index to start the retrival of search results from: 0\n"
]
],
[
[
"<p><b>Iteration over the search results</b> (from the index that has been provided and on) and <b>storage of the result URLs</b> that get retrieved into the text file.</p>",
"_____no_output_____"
]
],
[
[
"if numOfResultsToKeep >= 10:\n if startIndex == 0:\n iteration = 1\n response = requests.get(\"https://agris.fao.org/agris-search/biblio.do?\" + baseQueryStr + \"&\" + \"startIndexSearch=\")\n soup = BeautifulSoup(response.content, \"html.parser\")\n resultUrls = soup.find_all(\"div\", class_=\"col-md-10 col-sm-10 col-xs-12 inner\")\n for resultUrl in resultUrls:\n url = resultUrl.find(\"a\")\n file.write(url[\"href\"] + \"\\n\")\n iteration +=1\n while iteration <= numOfIterations:\n startIndex += 10\n response = requests.get(\"https://agris.fao.org/agris-search/biblio.do?\" + baseQueryStr + \"&\" + \"startIndexSearch=\" + str(startIndex))\n soup = BeautifulSoup(response.content, \"html.parser\")\n resultUrls = soup.find_all(\"div\", class_=\"col-md-10 col-sm-10 col-xs-12 inner\")\n for resultUrl in resultUrls:\n url = resultUrl.find(\"a\")\n file.write(url[\"href\"] + \"\\n\")\n iteration +=1\n else:\n iteration = 1\n while iteration <= numOfIterations:\n response = requests.get(\"https://agris.fao.org/agris-search/biblio.do?\" + baseQueryStr + \"&\" + \"startIndexSearch=\" + str(startIndex))\n soup = BeautifulSoup(response.content, \"html.parser\")\n resultUrls = soup.find_all(\"div\", class_=\"col-md-10 col-sm-10 col-xs-12 inner\")\n for resultUrl in resultUrls:\n url = resultUrl.find(\"a\")\n file.write(url[\"href\"] + \"\\n\")\n iteration += 1\n startIndex +=10\nelse:\n if startIndex == 0:\n response = requests.get(\"https://agris.fao.org/agris-search/biblio.do?\" + baseQueryStr + \"&\" + \"startIndexSearch=\")\n soup = BeautifulSoup(response.content, \"html.parser\")\n resultUrls = soup.find_all(\"div\", class_=\"col-md-10 col-sm-10 col-xs-12 inner\")\n counter = 0\n for resultUrl in resultUrls:\n if counter < numOfResultsToKeep:\n counter +=1\n url = resultUrl.find(\"a\")\n file.write(url[\"href\"] + \"\\n\")\n else:\n break\n else:\n response = requests.get(\"https://agris.fao.org/agris-search/biblio.do?\" + baseQueryStr + \"&\" + \"startIndexSearch=\" + str(startIndex))\n soup = BeautifulSoup(response.content, \"html.parser\")\n resultUrls = soup.find_all(\"div\", class_=\"col-md-10 col-sm-10 col-xs-12 inner\")\n counter = 0\n for resultUrl in resultUrls:\n if counter < numOfResultsToKeep:\n counter +=1\n url = resultUrl.find(\"a\")\n file.write(url[\"href\"] + \"\\n\")\n else:\n break",
"_____no_output_____"
]
],
[
[
"<p><b>Closing</b> the text file.</p>",
"_____no_output_____"
]
],
[
[
"file.close()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79bf5a9921ecb454010b70c1dc220150eec9b1b | 10,503 | ipynb | Jupyter Notebook | Applications/ENGR 202 Solver.ipynb | smithrockmaker/ENGR213 | 96e1c9da5aa02d280dd602194a5c7ace55cbd71b | [
"MIT"
] | null | null | null | Applications/ENGR 202 Solver.ipynb | smithrockmaker/ENGR213 | 96e1c9da5aa02d280dd602194a5c7ace55cbd71b | [
"MIT"
] | null | null | null | Applications/ENGR 202 Solver.ipynb | smithrockmaker/ENGR213 | 96e1c9da5aa02d280dd602194a5c7ace55cbd71b | [
"MIT"
] | null | null | null | 24.59719 | 145 | 0.522232 | [
[
[
"# ENGR 202 Solver",
"_____no_output_____"
]
],
[
[
"# importing the needed modules\nimport cmath as c\nimport math as m",
"_____no_output_____"
]
],
[
[
"## Solve for $X_C$ ",
"_____no_output_____"
]
],
[
[
"# Where f is frequency, cap is the value of the capacitor, and xcap is the capacitive reactance\n\nf = 5*10**3\ncap = 50*(10**-9)\nxcap = 1/-(2*m.pi*f*cap)\nprint(\"Xc =\",xcap)",
"Xc = -636.6197723675813\n"
]
],
[
[
"## Solve for $X_L$ ",
"_____no_output_____"
]
],
[
[
"# Where f is the frequency, l is the inductor value, and xind is the inductive reactance\n\nf = 5*10**3\nl = 200*(10**-3)\nxind = 2*m.pi*f*l\nprint(\"XL =\",xind)",
"XL = 6283.185307179587\n"
]
],
[
[
"## Define A complex number in rectangular form",
"_____no_output_____"
]
],
[
[
"# All values except r pulled from previous cells\n# Solutions are given in Rectangular form\n# Negative value for Xc already accounted for\n\nr = 100 # Resistor value\nx_c = r + 1j*(xcap)\nprint(\"For capacitor -\",x_c)\n\nx_i = r + 1j*(xind)\nprint(\"For inductor -\",x_i)",
"For capacitor - (100-636.6197723675813j)\nFor inductor - (100+6283.185307179587j)\n"
]
],
[
[
"## Convert from Rectangular to Polar ",
"_____no_output_____"
]
],
[
[
"# Answers are given in magnitude and radians. Convert if degrees are necessary.\n\ny = c.polar(x_c)\nprint(\"Magnitude, radians\",y)\n\ny = c.polar(x_i)\nprint(\"Magnitude, radians\",y)",
"Magnitude, radians (644.4258953280439, -1.414989826825355)\nMagnitude, radians (6283.981031508405, 1.5548821760954434)\n"
]
],
[
[
"## Convert from Radians to Degrees \nThe above answers will be in radians, use the following code to convert to degrees.",
"_____no_output_____"
]
],
[
[
"#substitute x_c and x_i as needed\n\nz=c.phase(x_c)\nm.degrees(z)\n\nprint(\"Angle in degrees =\",m.degrees(z))",
"Angle in degrees = -81.07294513104007\n"
]
],
[
[
"## Simple Circuit in Series ",
"_____no_output_____"
]
],
[
[
"# For following three cells, if reactance is already given, replace \"xind\" or\"xcap\" with corresponding j value\n# Resistor value is overwritten from previous cells when changed here\n# Not all simple circuits will have all three components. Modify as needed.\n# Original formula - series_comb = r + ind + cap\n\nr = 100 # Resistor Value\nind = 0 + xind*1j\ncap = 0 - xcap*1j\nseries_comb = r + ind + cap\nprint(\"Series Rectangular Form =\",series_comb)",
"Series Rectangular Form = (100+6919.805079547168j)\n"
]
],
[
[
"## Simple Parallel Circuit - Product/Sum",
"_____no_output_____"
]
],
[
[
"# Product sum rule works only with 2 components\n# Original Formula - prod_sum = res*cap/(res + cap)\n\nind = 0 + xind*1j\ncap = 0 + xcap*1j\nres = 100\nprod_sum = res*cap/(res + cap)\nprint(\"Product/sum Rectangular Form =\",prod_sum)",
"Product/sum Rectangular Form = (97.59201358307332-15.329717646080926j)\n"
]
],
[
[
"## Simple Parallel Circuit ",
"_____no_output_____"
]
],
[
[
"# Use as many components as necessary\n# Original formula - parallel_comb = 1/(1/res + 1/ind + 1/cap)\n\nind = 0 + xind*1j\ncap = 0 + xcap*1j\nres = 100\nparallel_comb = 1/(1/res + 1/ind + 1/cap)\nprint(\"Parallel Rectangular Form =\",parallel_comb)",
"Parallel Rectangular Form = (98.04620253923555-13.840607701931122j)\n"
]
],
[
[
"## Current Solver ",
"_____no_output_____"
]
],
[
[
"# Make sure to use the parallel cell that IS NOT product/sum\n# Copy and paste cur_ind or cur_cap sections as necessary to account for all components. Some code modifaction/addition may be required.\n# This cell useful as is for one of each component.\n# Once previous cells are complete, this will populate automatically EXCEPT for E\n\nE = 10 #Equivalent Voltage\nZ_rect = parallel_comb\nZ_polar = c.polar(Z_rect)\nprint(\"Z Polar = \",Z_polar,\"\\n\")\nprint(\" Z Rectangular =\",parallel_comb,\"\\n\")\n\ncur_source = E/Z_rect\ncur_source_p = c.polar(cur_source)\nz=c.phase(cur_source)\nm.degrees(z)\nprint(\"Source Current =\",cur_source,\"\\n\",\"Source Current, Polar =\",cur_source_p,\"\\n\",\"Angle = \",m.degrees(z),\"\\n\")\n\n\ncur_cap = cur_source*Z_rect/cap\ncur_cap_p = c.polar(cur_cap)\nz=c.phase(cur_cap)\nm.degrees(z)\nprint(\"Capacitor Current =\",cur_cap,\"\\n\",\"Capacitor Current, Polar =\",cur_cap_p,\"\\n\",\"Angle = \",m.degrees(z),\"\\n\")\n\n \ncur_ind = cur_source*Z_rect/ind\ncur_ind_p = c.polar(cur_ind)\nz=c.phase(cur_ind)\nm.degrees(z)\nprint(\"inductor Current =\",cur_ind,\"\\n\",\"Inductor Current, Polar =\",cur_ind_p,\"\\n\",\"Angle = \",m.degrees(z),\"\\n\")\n\ncur_res = cur_source*Z_rect/res\ncur_res_p = c.polar(cur_res)\nz=c.phase(cur_res)\nm.degrees(z)\nprint(\"Resistor Current =\",cur_res,\"\\n\",\"Resistor Current, Polar =\",cur_res_p,\"\\n\",\"Angle = \",m.degrees(z),\"\\n\")\n",
"Z Polar = (99.01828242260899, -0.14023751838586943) \n\n Z Rectangular = (98.04620253923555-13.840607701931122j) \n\nSource Current = (0.1+0.014116413837030016j) \n Source Current, Polar = (0.1009914508244054, 0.14023751838586943) \n Angle = 8.035017932898603 \n\nCapacitor Current = (-0+0.015707963267948967j) \n Capacitor Current, Polar = (0.015707963267948967, 1.5707963267948966) \n Angle = 90.0 \n\ninductor Current = -0.0015915494309189533j \n Inductor Current, Polar = (0.0015915494309189533, -1.5707963267948966) \n Angle = -90.0 \n\nResistor Current = (0.1+0j) \n Resistor Current, Polar = (0.1, 0.0) \n Angle = 0.0 \n\n"
]
],
[
[
"## Series-Parallel Circuits",
"_____no_output_____"
]
],
[
[
"# Organization cell for component values\n\n# Inductors\nz1 = 200*1j\n\n# Resistors\nz2 = 300\nz3 = 270\n\n#Capacitors\nz4 = -1500*1j",
"_____no_output_____"
],
[
"# This cell is ambiguous with just z values to make it easy to modify. Keep track of z values.\n# Original Form of equation - parallel_react = 1/(1/z1+1/z2+1/(z3+z4))\n\n\nparallel_react = 1/(1/z1+1/z2+1/(z3+z4))\nparallel_polar = c.polar(parallel_react)\nprint(\"Z Rectangular =\",parallel_react,\"\\n\",\"Z Polar =\",parallel_polar)",
"Z Rectangular = (111.7846057266418+141.10138457782585j) \n Z Polar = (180.01499606210666, 0.9008118071374078)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79c004f4bba11ce6beeba48dcbe96f4c6afd942 | 64,617 | ipynb | Jupyter Notebook | code/notebooks/bsnip001-Copy3.ipynb | zeroknowledgediscovery/zcad | 5642a7ab0ac29337a4066305091811032ab9032b | [
"MIT"
] | null | null | null | code/notebooks/bsnip001-Copy3.ipynb | zeroknowledgediscovery/zcad | 5642a7ab0ac29337a4066305091811032ab9032b | [
"MIT"
] | null | null | null | code/notebooks/bsnip001-Copy3.ipynb | zeroknowledgediscovery/zcad | 5642a7ab0ac29337a4066305091811032ab9032b | [
"MIT"
] | null | null | null | 45.123603 | 13,292 | 0.59424 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn import neighbors, datasets\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.datasets import make_blobs\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom scipy.spatial import ConvexHull\nfrom tqdm import tqdm\nimport random\nplt.style.use('ggplot')\nimport pickle\nfrom sklearn import tree\nfrom sklearn.tree import export_graphviz\nfrom joblib import dump, load\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\n%matplotlib inline\nfrom sklearn.impute import SimpleImputer",
"_____no_output_____"
],
[
"def getAuc(X,y,test_size=0.25,max_depth=None,n_estimators=100,\n minsplit=4,FPR=[],TPR=[],VERBOSE=False, USE_ONLY=None):\n '''\n get AUC given training data X, with target labels y\n '''\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\n CLASSIFIERS=[DecisionTreeClassifier(max_depth=max_depth, min_samples_split=minsplit,class_weight='balanced'),\n RandomForestClassifier(n_estimators=n_estimators,\n max_depth=max_depth,min_samples_split=minsplit,class_weight='balanced'),\n ExtraTreesClassifier(n_estimators=n_estimators,\n max_depth=max_depth,min_samples_split=minsplit,class_weight='balanced'),\n AdaBoostClassifier(n_estimators=n_estimators),\n GradientBoostingClassifier(n_estimators=n_estimators,max_depth=max_depth),\n svm.SVC(kernel='rbf',gamma='scale',class_weight='balanced',probability=True)]\n\n if USE_ONLY is not None:\n if isinstance(USE_ONLY, (list,)):\n CLASSIFIERS=[CLASSIFIERS[i] for i in USE_ONLY]\n if isinstance(USE_ONLY, (int,)):\n CLASSIFIERS=CLASSIFIERS[USE_ONLY]\n\n for clf in CLASSIFIERS:\n clf.fit(X_train,y_train)\n y_pred=clf.predict_proba(X_test)\n #print(X_test,y_pred)\n fpr, tpr, thresholds = metrics.roc_curve(y_test,y_pred[:,1], pos_label=1)\n auc=metrics.auc(fpr, tpr)\n if VERBOSE:\n print(auc)\n\n FPR=np.append(FPR,fpr)\n TPR=np.append(TPR,tpr)\n points=np.array([[a[0],a[1]] for a in zip(FPR,TPR)])\n hull = ConvexHull(points)\n x=np.argsort(points[hull.vertices,:][:,0])\n auc=metrics.auc(points[hull.vertices,:][x,0],points[hull.vertices,:][x,1])\n return auc,CLASSIFIERS\n\n\ndef saveFIG(filename='tmp.pdf',AXIS=False):\n '''\n save fig for publication\n '''\n import pylab as plt\n plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, \n hspace = 0, wspace = 0)\n plt.margins(0,0)\n if not AXIS:\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.savefig(filename,dpi=300, bbox_inches = 'tight',\n pad_inches = 0,transparent=False) \n return",
"_____no_output_____"
],
[
"def getCoverage(model,verbose=True):\n '''\n return how many distinct items (questions)\n are used in the model set.\n This includes the set of questions being\n covered by all forms that may be \n generated by the model set\n '''\n FS=[]\n for m in model:\n for count in range(len(m.estimators_)):\n clf=m.estimators_[count]\n fs=clf.tree_.feature[clf.tree_.feature>0]\n FS=np.array(list(set(np.append(FS,fs))))\n if verbose:\n print(\"Number of items used: \", FS.size)\n return FS\n\ndef getConfusion(X,y,test_size=0.25,max_depth=None,n_estimators=100,\n minsplit=4,CONFUSION={},VERBOSE=False, USE_ONLY=None,target_names = None):\n '''\n get AUC given training data X, with target labels y\n '''\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\n CLASSIFIERS=[DecisionTreeClassifier(max_depth=max_depth, min_samples_split=minsplit),\n RandomForestClassifier(n_estimators=n_estimators,class_weight='balanced',\n max_depth=max_depth,min_samples_split=minsplit),\n ExtraTreesClassifier(n_estimators=n_estimators,class_weight='balanced',\n max_depth=max_depth,min_samples_split=minsplit),\n AdaBoostClassifier(n_estimators=n_estimators),\n GradientBoostingClassifier(n_estimators=n_estimators,max_depth=max_depth),\n svm.SVC(kernel='rbf',gamma='scale',class_weight='balanced',probability=True)]\n\n if USE_ONLY is not None:\n if isinstance(USE_ONLY, (list,)):\n CLASSIFIERS=[CLASSIFIERS[i] for i in USE_ONLY]\n if isinstance(USE_ONLY, (int,)):\n CLASSIFIERS=CLASSIFIERS[USE_ONLY]\n\n for clf in CLASSIFIERS:\n clf.fit(X_train,y_train)\n y_pred=clf.predict(X_test)\n print(y_test,y_pred)\n cmat=confusion_matrix(y_test, y_pred)\n acc=accuracy_score(y_test, y_pred)\n \n CONFUSION[clf]=cmat\n \n if VERBOSE:\n print(classification_report(y_test, y_pred, target_names=target_names))\n print('Confusion MAtrix:\\n', cmat)\n print(' ')\n print('Accuracy:', acc)\n\n \n return CONFUSION,acc",
"_____no_output_____"
],
[
"df=pd.read_csv('bsnip.csv',index_col=0)\ndf.head()",
"_____no_output_____"
],
[
"# 3 is HC\ndf.Biotype.value_counts()",
"_____no_output_____"
],
[
"#df=df[df['Biotype']==3]\ndf=df.dropna()\ndf0=df",
"_____no_output_____"
],
[
"#df=df0[df0.Biotype.isin([1,5])]\ndf=df0\nX=df.iloc[:,2:].values\ny=df.Biotype.values#.astype(str)\ny5=[(int(x)==5)+0 for x in y ]\ny1=[(int(x)==1)+0 for x in y ]\ny2=[(int(x)==2)+0 for x in y ]\nX.shape",
"_____no_output_____"
],
[
"X_train_, X_test_, y_train_, y_test_ = train_test_split(X, y, test_size=0.2)\ny5=[(int(x)==5)+0 for x in y_train_ ]\ny1=[(int(x)==1)+0 for x in y_train_ ]\ny2=[(int(x)==2)+0 for x in y_train_ ]\n",
"_____no_output_____"
],
[
"ACC=[]\nCLFh={}\nfor run in tqdm(np.arange(500)):\n auc,CLFS=getAuc(X_train_,y5,test_size=0.2,max_depth=10,n_estimators=2,\n minsplit=2,VERBOSE=False, USE_ONLY=[2])\n ACC=np.append(ACC,auc)\n if auc > 0.75:\n CLFh[auc]=CLFS\n#sns.distplot(ACC)\nprint(np.median(ACC))\nCLFstar5=CLFh[np.array([k for k in CLFh.keys()]).max()][0]",
"100%|██████████| 500/500 [00:03<00:00, 132.26it/s]"
],
[
"ACC=[]\nCLFh={}\nfor run in tqdm(np.arange(500)):\n auc,CLFS=getAuc(X_train_,y1,test_size=0.2,max_depth=10,n_estimators=2,\n minsplit=2,VERBOSE=False, USE_ONLY=[2])\n ACC=np.append(ACC,auc)\n if auc > 0.65:\n CLFh[auc]=CLFS\n#sns.distplot(ACC)\nprint(np.median(ACC))\nCLFstar1=CLFh[np.array([k for k in CLFh.keys()]).max()][0]",
"100%|██████████| 500/500 [00:03<00:00, 131.32it/s]"
],
[
"ACC=[]\nCLFh={}\nfor run in tqdm(np.arange(500)):\n auc,CLFS=getAuc(X_train_,y2,test_size=0.2,max_depth=10,n_estimators=2,\n minsplit=2,VERBOSE=False, USE_ONLY=[2])\n ACC=np.append(ACC,auc)\n if auc > 0.75:\n CLFh[auc]=CLFS\n#sns.distplot(ACC)\nprint(np.median(ACC))\nCLFstar2=CLFh[np.array([k for k in CLFh.keys()]).max()][0]",
"100%|██████████| 500/500 [00:03<00:00, 134.36it/s]"
],
[
"y_pred5p=CLFstar5.predict_proba(X_test_)\ny_pred1p=CLFstar1.predict_proba(X_test_)\ny_pred2p=CLFstar2.predict_proba(X_test_)\n\nY=[]\na=1\nfor (i,j,k) in zip(y_pred1p[:,1]**a,y_pred5p[:,1]**a,y_pred2p[:,1]**a):\n idx=np.argmax([i,j,k])\n #print(idx)\n if idx == 0:\n l=1\n Y=np.append(Y,l) \n continue\n if idx == 1:\n l=5\n Y=np.append(Y,l) \n continue\n if idx == 2:\n l=2\n Y=np.append(Y,l) \n continue\nprint(len(Y))\n\nfrom sklearn.metrics import accuracy_score\nACC=accuracy_score(y_test_, Y)\nfrom sklearn.metrics import confusion_matrix\nC=confusion_matrix(y_test_, Y)\nrow_sums = C.sum(axis=1)\nC1 = C / row_sums[:, np.newaxis]\nprint(ACC,C1)",
"309\n0.5307443365695793 [[0.65346535 0.1980198 0.14851485]\n [0.28181818 0.39090909 0.32727273]\n [0.32653061 0.1122449 0.56122449]]\n"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)\ny_pred5p=CLFstar5.predict_proba(X_test)\ny_pred1p=CLFstar1.predict_proba(X_test)\ny_pred2p=CLFstar2.predict_proba(X_test)\n\nY=[]\na=-.3\nfor (i,j,k) in zip(y_pred1p[:,1]**a,y_pred5p[:,1]**a,y_pred2p[:,1]**a):\n idx=np.argmax([i,j,k])\n #print(idx)\n if idx == 0:\n l=1\n Y=np.append(Y,l) \n continue\n if idx == 1:\n l=5\n Y=np.append(Y,l) \n continue\n if idx == 2:\n l=2\n Y=np.append(Y,l) \n continue\nprint(len(Y))\n\nfrom sklearn.metrics import accuracy_score\nACC=accuracy_score(y_test, Y)\nfrom sklearn.metrics import confusion_matrix\nC=confusion_matrix(y_test, Y)\nrow_sums = C.sum(axis=1)\nC1 = C / row_sums[:, np.newaxis]\nprint(ACC,C1)",
"771\n0.06874189364461739 [[0.05226481 0.34494774 0.60278746]\n [0.456621 0.08219178 0.46118721]\n [0.53584906 0.38867925 0.0754717 ]]\n"
],
[
"for runs in np.arange(100):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n y_pred5p=CLFstar5.predict_proba(X_test)\n y_pred1p=CLFstar1.predict_proba(X_test)\n y_pred2p=CLFstar2.predict_proba(X_test)\n\n Y=[]\n a=1\n for (i,j,k) in zip(y_pred1p[:,1]**a,y_pred5p[:,1]**a,y_pred2p[:,1]**a):\n idx=np.argmax([i,j,k])\n #print(idx)\n if idx == 0:\n l=1\n Y=np.append(Y,l) \n continue\n if idx == 1:\n l=5\n Y=np.append(Y,l) \n continue\n if idx == 2:\n l=2\n Y=np.append(Y,l) \n continue\n\n from sklearn.metrics import accuracy_score\n ACC=accuracy_score(y_test, Y)\n from sklearn.metrics import confusion_matrix\n C=confusion_matrix(y_test, Y)\n row_sums = C.sum(axis=1)\n C1 = C / row_sums[:, np.newaxis]\n print(ACC,C1)",
"0.8058252427184466 [[0.87155963 0.05504587 0.0733945 ]\n [0.14736842 0.64210526 0.21052632]\n [0.0952381 0.01904762 0.88571429]]\n0.8187702265372169 [[0.83185841 0.09734513 0.07079646]\n [0.06593407 0.76923077 0.16483516]\n [0.1047619 0.04761905 0.84761905]]\n0.8155339805825242 [[0.8559322 0.06779661 0.07627119]\n [0.13483146 0.74157303 0.12359551]\n [0.1372549 0.02941176 0.83333333]]\n0.8414239482200647 [[0.90983607 0.04918033 0.04098361]\n [0.1011236 0.76404494 0.13483146]\n [0.12244898 0.05102041 0.82653061]]\n0.8414239482200647 [[0.87704918 0.06557377 0.05737705]\n [0.10227273 0.79545455 0.10227273]\n [0.14141414 0.02020202 0.83838384]]\n0.8122977346278317 [[0.83606557 0.08196721 0.08196721]\n [0.11627907 0.69767442 0.18604651]\n [0.08910891 0.02970297 0.88118812]]\n0.8090614886731392 [[0.86885246 0.05737705 0.07377049]\n [0.11956522 0.68478261 0.19565217]\n [0.08421053 0.06315789 0.85263158]]\n0.8220064724919094 [[0.864 0.064 0.072 ]\n [0.12903226 0.74193548 0.12903226]\n [0.08791209 0.06593407 0.84615385]]\n0.7896440129449838 [[0.78947368 0.10526316 0.10526316]\n [0.08695652 0.73913043 0.17391304]\n [0.08737864 0.0776699 0.83495146]]\n0.8576051779935275 [[0.86554622 0.08403361 0.05042017]\n [0.09638554 0.8313253 0.07228916]\n [0.11214953 0.01869159 0.86915888]]\n0.8155339805825242 [[0.85245902 0.05737705 0.09016393]\n [0.125 0.7875 0.0875 ]\n [0.1682243 0.03738318 0.79439252]]\n0.8317152103559871 [[0.84 0.08 0.08 ]\n [0.12048193 0.74698795 0.13253012]\n [0.04950495 0.05940594 0.89108911]]\n0.8317152103559871 [[0.84955752 0.07079646 0.07964602]\n [0.07865169 0.78651685 0.13483146]\n [0.07476636 0.07476636 0.85046729]]\n0.8349514563106796 [[0.88709677 0.07258065 0.04032258]\n [0.15053763 0.68817204 0.16129032]\n [0.0326087 0.05434783 0.91304348]]\n0.8058252427184466 [[0.88695652 0.04347826 0.06956522]\n [0.11 0.74 0.15 ]\n [0.14893617 0.07446809 0.77659574]]\n0.8122977346278317 [[0.90566038 0.05660377 0.03773585]\n [0.18269231 0.71153846 0.10576923]\n [0.14141414 0.04040404 0.81818182]]\n0.8187702265372169 [[0.83846154 0.1 0.06153846]\n [0.07142857 0.77380952 0.1547619 ]\n [0.09473684 0.07368421 0.83157895]]\n0.7961165048543689 [[0.85123967 0.11570248 0.03305785]\n [0.11494253 0.71264368 0.17241379]\n [0.15841584 0.03960396 0.8019802 ]]\n0.8284789644012945 [[0.82051282 0.09401709 0.08547009]\n [0.10638298 0.74468085 0.14893617]\n [0.04081633 0.04081633 0.91836735]]\n0.7799352750809061 [[0.79365079 0.12698413 0.07936508]\n [0.15294118 0.68235294 0.16470588]\n [0.12244898 0.03061224 0.84693878]]\n0.7928802588996764 [[0.8 0.10434783 0.09565217]\n [0.13333333 0.71111111 0.15555556]\n [0.10576923 0.03846154 0.85576923]]\n0.8381877022653722 [[0.88617886 0.08130081 0.03252033]\n [0.13541667 0.77083333 0.09375 ]\n [0.13333333 0.02222222 0.84444444]]\n0.8381877022653722 [[0.93693694 0.04504505 0.01801802]\n [0.1010101 0.68686869 0.21212121]\n [0.07070707 0.05050505 0.87878788]]\n0.8414239482200647 [[0.83760684 0.07692308 0.08547009]\n [0.09210526 0.77631579 0.13157895]\n [0.07758621 0.03448276 0.88793103]]\n0.8058252427184466 [[0.8487395 0.08403361 0.06722689]\n [0.09782609 0.75 0.15217391]\n [0.12244898 0.07142857 0.80612245]]\n0.8317152103559871 [[0.86440678 0.05084746 0.08474576]\n [0.1122449 0.75510204 0.13265306]\n [0.06451613 0.06451613 0.87096774]]\n0.7961165048543689 [[0.82857143 0.1047619 0.06666667]\n [0.0952381 0.73333333 0.17142857]\n [0.12121212 0.05050505 0.82828283]]\n0.8381877022653722 [[0.87603306 0.05785124 0.0661157 ]\n [0.1011236 0.79775281 0.1011236 ]\n [0.14141414 0.03030303 0.82828283]]\n0.8220064724919094 [[0.85470085 0.05982906 0.08547009]\n [0.11702128 0.72340426 0.15957447]\n [0.08163265 0.04081633 0.87755102]]\n0.8349514563106796 [[0.89166667 0.075 0.03333333]\n [0.09411765 0.72941176 0.17647059]\n [0.10576923 0.03846154 0.85576923]]\n0.8187702265372169 [[0.86206897 0.0862069 0.05172414]\n [0.11340206 0.72164948 0.16494845]\n [0.10416667 0.03125 0.86458333]]\n0.8058252427184466 [[0.84482759 0.06896552 0.0862069 ]\n [0.18666667 0.69333333 0.12 ]\n [0.11016949 0.05084746 0.83898305]]\n0.8317152103559871 [[0.85074627 0.10447761 0.04477612]\n [0.10588235 0.74117647 0.15294118]\n [0.1 0.01111111 0.88888889]]\n0.8058252427184466 [[0.84722222 0.0625 0.09027778]\n [0.11392405 0.74683544 0.13924051]\n [0.12790698 0.08139535 0.79069767]]\n0.7766990291262136 [[0.82307692 0.09230769 0.08461538]\n [0.09090909 0.7012987 0.20779221]\n [0.15686275 0.06862745 0.7745098 ]]\n0.8090614886731392 [[0.81415929 0.08849558 0.09734513]\n [0.08695652 0.79347826 0.11956522]\n [0.14423077 0.03846154 0.81730769]]\n0.8058252427184466 [[0.86259542 0.08396947 0.05343511]\n [0.1097561 0.7195122 0.17073171]\n [0.10416667 0.09375 0.80208333]]\n0.8090614886731392 [[0.85365854 0.06504065 0.08130081]\n [0.12162162 0.74324324 0.13513514]\n [0.10714286 0.08928571 0.80357143]]\n0.8187702265372169 [[0.84482759 0.06034483 0.09482759]\n [0.12941176 0.69411765 0.17647059]\n [0.05555556 0.05555556 0.88888889]]\n0.8155339805825242 [[0.85123967 0.09090909 0.05785124]\n [0.13636364 0.72727273 0.13636364]\n [0.1 0.05 0.85 ]]\n0.7702265372168284 [[0.8030303 0.12878788 0.06818182]\n [0.1686747 0.68674699 0.14457831]\n [0.14893617 0.05319149 0.79787234]]\n0.8090614886731392 [[0.87755102 0.10204082 0.02040816]\n [0.16037736 0.68867925 0.1509434 ]\n [0.0952381 0.03809524 0.86666667]]\n0.8414239482200647 [[0.84615385 0.06153846 0.09230769]\n [0.06593407 0.81318681 0.12087912]\n [0.09090909 0.04545455 0.86363636]]\n0.7961165048543689 [[0.87179487 0.05128205 0.07692308]\n [0.16666667 0.69607843 0.1372549 ]\n [0.11111111 0.07777778 0.81111111]]\n0.8349514563106796 [[0.8559322 0.05932203 0.08474576]\n [0.09433962 0.76415094 0.14150943]\n [0.08235294 0.02352941 0.89411765]]\n0.8220064724919094 [[0.86666667 0.06666667 0.06666667]\n [0.08433735 0.77108434 0.14457831]\n [0.12264151 0.06603774 0.81132075]]\n0.7896440129449838 [[0.83739837 0.07317073 0.08943089]\n [0.14117647 0.67058824 0.18823529]\n [0.11881188 0.04950495 0.83168317]]\n0.8414239482200647 [[0.8880597 0.04477612 0.06716418]\n [0.10843373 0.77108434 0.12048193]\n [0.10869565 0.05434783 0.83695652]]\n0.8122977346278317 [[0.84920635 0.07936508 0.07142857]\n [0.13483146 0.75280899 0.11235955]\n [0.10638298 0.07446809 0.81914894]]\n0.7928802588996764 [[0.84347826 0.09565217 0.06086957]\n [0.14583333 0.69791667 0.15625 ]\n [0.12244898 0.05102041 0.82653061]]\n0.7831715210355987 [[0.81896552 0.10344828 0.07758621]\n [0.13333333 0.7 0.16666667]\n [0.12621359 0.05825243 0.81553398]]\n0.8220064724919094 [[0.83739837 0.04065041 0.12195122]\n [0.11827957 0.75268817 0.12903226]\n [0.08602151 0.04301075 0.87096774]]\n0.7864077669902912 [[0.78333333 0.10833333 0.10833333]\n [0.15909091 0.68181818 0.15909091]\n [0.04950495 0.06930693 0.88118812]]\n0.8025889967637541 [[0.83050847 0.05084746 0.11864407]\n [0.15 0.725 0.125 ]\n [0.08108108 0.09009009 0.82882883]]\n0.7993527508090615 [[0.83333333 0.09649123 0.07017544]\n [0.14141414 0.73737374 0.12121212]\n [0.125 0.05208333 0.82291667]]\n0.8090614886731392 [[0.85245902 0.09836066 0.04918033]\n [0.12903226 0.67741935 0.19354839]\n [0.10638298 0.0106383 0.88297872]]\n0.8058252427184466 [[0.90654206 0.02803738 0.06542056]\n [0.19 0.71 0.1 ]\n [0.16666667 0.03921569 0.79411765]]\n0.7864077669902912 [[0.79230769 0.07692308 0.13076923]\n [0.1369863 0.69863014 0.16438356]\n [0.12264151 0.03773585 0.83962264]]\n0.8187702265372169 [[0.89922481 0.04651163 0.05426357]\n [0.16666667 0.67948718 0.15384615]\n [0.14705882 0.02941176 0.82352941]]\n0.8317152103559871 [[0.85245902 0.10655738 0.04098361]\n [0.09756098 0.79268293 0.1097561 ]\n [0.13333333 0.02857143 0.83809524]]\n0.8220064724919094 [[0.89830508 0.05084746 0.05084746]\n [0.16326531 0.70408163 0.13265306]\n [0.09677419 0.05376344 0.84946237]]\n0.8058252427184466 [[0.88034188 0.06837607 0.05128205]\n [0.09183673 0.69387755 0.21428571]\n [0.14893617 0.0212766 0.82978723]]\n0.8511326860841424 [[0.89230769 0.06923077 0.03846154]\n [0.1097561 0.7804878 0.1097561 ]\n [0.11340206 0.03092784 0.8556701 ]]\n0.8090614886731392 [[0.81651376 0.08256881 0.10091743]\n [0.08910891 0.75247525 0.15841584]\n [0.12121212 0.02020202 0.85858586]]\n0.8446601941747572 [[0.8828125 0.078125 0.0390625 ]\n [0.09411765 0.72941176 0.17647059]\n [0.07291667 0.03125 0.89583333]]\n0.8317152103559871 [[0.89344262 0.04098361 0.06557377]\n [0.1375 0.7375 0.125 ]\n [0.10280374 0.06542056 0.8317757 ]]\n0.7766990291262136 [[0.84070796 0.09734513 0.0619469 ]\n [0.14606742 0.69662921 0.15730337]\n [0.14018692 0.08411215 0.77570093]]\n0.8349514563106796 [[0.83064516 0.05645161 0.11290323]\n [0.09195402 0.7816092 0.12643678]\n [0.07142857 0.04081633 0.8877551 ]]\n"
],
[
"RUNS=500\nA=[]\nfor run in tqdm(np.arange(RUNS)):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n y_pred5=CLFstar5.predict(X_test)\n y_pred1=CLFstar1.predict(X_test)\n Y=[]\n for (i,j) in zip(y_pred1,y_pred5):\n if i==0 and j==1:\n k=5\n if i==1 and j==0:\n k=1\n if i==0 and j==0:\n k=2\n if i==1 and j==1:\n k=1\n Y=np.append(Y,k) \n A=np.append(A,accuracy_score(y_test, Y))\nsns.distplot(A)",
"100%|██████████| 500/500 [00:02<00:00, 170.57it/s]\n"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\ny_pred5=CLFstar5.predict(X)\ny_pred1=CLFstar1.predict(X)\nY=[]\nfor (i,j) in zip(y_pred1,y_pred5):\n if i==0 and j==1:\n k=5\n if i==1 and j==0:\n k=1\n if i==0 and j==0:\n k=2\n if i==1 and j==1:\n k=1\n Y=np.append(Y,k) \nACC=accuracy_score(y_test, Y)\nfrom sklearn.metrics import confusion_matrix\nC=confusion_matrix(y_test, Y)",
"_____no_output_____"
],
[
"C",
"_____no_output_____"
],
[
"def getCoverage(model,verbose=True):\n '''\n return how many distinct items (questions)\n are used in the model set.\n This includes the set of questions being\n covered by all forms that may be \n generated by the model set\n '''\n FS=[]\n for m in model:\n for count in range(len(m.estimators_)):\n clf=m.estimators_[count]\n fs=clf.tree_.feature[clf.tree_.feature>0]\n FS=np.array(list(set(np.append(FS,fs))))\n if verbose:\n print(\"Number of items used: \", FS.size)\n return FS",
"_____no_output_____"
],
[
"getCoverage(CLFstar1)",
"_____no_output_____"
],
[
"A=CLFstar1.estimators_[0]\na=' '.join(list(A.tree_.feature.astype(str))).split('-2')\nb=[len(x.split()) for x in a if x != ' ']\nnp.array(b).sum()*(10/2**9)\n#len(list(set(A.tree_.feature[A.tree_.feature>0])))",
"_____no_output_____"
],
[
"def getAvLen(clf):\n s=0\n for e in clf.estimators_:\n a=' '.join(list(e.tree_.feature.astype(str))).split('-2')\n b=[len(x.split()) for x in a if x != ' ']\n s=s+np.array(b).sum()*(10/2**9)\n return s",
"_____no_output_____"
],
[
"getAvLen(CLFstar1)",
"_____no_output_____"
],
[
"getAvLen(CLFstar5)",
"_____no_output_____"
],
[
"getAvLen(CLFstar2)+getAvLen(CLFstar5)",
"_____no_output_____"
],
[
"def drawTrees(model):\n '''\n draw the estimators (trees)\n in a single model\n '''\n N=len(model.estimators_)\n\n for count in range(N):\n estimator = model.estimators_[count]\n\n export_graphviz(estimator, out_file='tmptree.dot', \n #feature_names = iris.feature_names,\n #class_names = iris.target_names,\n rounded = True, proportion = False, \n precision = 2, filled = True)\n\n from subprocess import call\n call(['dot', '-Tpng', 'tmptree.dot', '-o', 'tmptree'+str(count)+'.png', '-Gdpi=600'])\n from IPython.display import Image\n Image(filename = 'tmptree'+str(count)+'.png') ",
"_____no_output_____"
],
[
"drawTrees(CLFstar5)",
"_____no_output_____"
],
[
"from scipy import interpolate\nfrom scipy.interpolate import interp1d\nauc_=[]\nROC={}\nfpr_ = np.linspace(0, 1, num=20, endpoint=True)\nfor run in np.arange(1000):\n clf=CLFstar\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)\n y_pred=clf.predict_proba(X_test)\n fpr, tpr, thresholds = metrics.roc_curve(y_test,y_pred[:,1], pos_label=1)\n f = interp1d(fpr, tpr)\n auc_=np.append(auc_,metrics.auc(fpr_, f(fpr_)))\n ROC[metrics.auc(fpr, tpr)]={'fpr':fpr_,'tpr':f(fpr_)}\nsns.distplot(auc_)\nauc_.mean()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79c25bc007738578a5dd5ba0fe3bd83969514c2 | 196,291 | ipynb | Jupyter Notebook | WeatherWork.ipynb | bgreenawald/ML-Final | c2938bcb738a7faff8a301495c32842428721a04 | [
"MIT"
] | null | null | null | WeatherWork.ipynb | bgreenawald/ML-Final | c2938bcb738a7faff8a301495c32842428721a04 | [
"MIT"
] | null | null | null | WeatherWork.ipynb | bgreenawald/ML-Final | c2938bcb738a7faff8a301495c32842428721a04 | [
"MIT"
] | null | null | null | 42.340595 | 175 | 0.286172 | [
[
[
"%matplotlib inline\n\nfrom functools import reduce\nimport datetime\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nfrom sklearn.preprocessing import MinMaxScaler",
"_____no_output_____"
],
[
"# Filenames\ncity_file = 'city_attributes.csv'\ntemp_file = 'temperature.csv'\nhumid_file = 'humidity.csv'\npress_file = 'pressure.csv'\ndesc_file = 'weather_description.csv'\nwdir_file = 'wind_direction.csv'\nwspeed_file = 'wind_speed.csv'\n# Load the files\ncity_df = pd.read_csv(city_file)\ncity_df.rename(str.lower, axis = 'columns', inplace = True)\ncity_df.drop(['country'], axis = 1, inplace = True)\ncity_df.set_index(['city'], inplace = True)\ntemp_df = pd.read_csv(temp_file)\nhumid_df = pd.read_csv(humid_file)\npress_df = pd.read_csv(press_file)\ndesc_df = pd.read_csv(desc_file)\nwdir_df = pd.read_csv(wdir_file)\nwspeed_df = pd.read_csv(wspeed_file)",
"_____no_output_____"
],
[
"# These are the cities that universally have > 1% missing across all weather values\ndrop_city = set(temp_df.columns[temp_df.isna().sum() > 500]) & \\\nset(humid_df.columns[humid_df.isna().sum() > 500]) & \\\nset(press_df.columns[press_df.isna().sum() > 500]) & \\\nset(desc_df.columns[desc_df.isna().sum() > 500]) & \\\nset(wdir_df.columns[wdir_df.isna().sum() > 500]) & \\\nset(wspeed_df.columns[wspeed_df.isna().sum() > 500]) ",
"_____no_output_____"
],
[
"# Remove the undesired cities and melt the tables to be conducive for joining\nalt_temp_df = pd.melt(temp_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'temperature')\nalt_humid_df = pd.melt(humid_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'humidity')\nalt_press_df = pd.melt(press_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'pressure')\nalt_desc_df = pd.melt(desc_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'weather_description')\nalt_wdir_df = pd.melt(wdir_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'wind_direction')\nalt_wspeed_df = pd.melt(wspeed_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'wind_speed')\n# Set proper indices\nalt_temp_df = alt_temp_df.set_index(['city', 'datetime'])\nalt_humid_df = alt_humid_df.set_index(['city', 'datetime'])\nalt_press_df = alt_press_df.set_index(['city', 'datetime'])\nalt_desc_df = alt_desc_df.set_index(['city', 'datetime'])\nalt_wdir_df = alt_wdir_df.set_index(['city', 'datetime'])\nalt_wspeed_df = alt_wspeed_df.set_index(['city', 'datetime'])",
"_____no_output_____"
],
[
"# Join tables on the city and datetime info\ndfs = [city_df, alt_temp_df, alt_humid_df, alt_press_df, alt_wspeed_df, alt_wdir_df, alt_desc_df]\ndf_final = reduce(lambda left, right : pd.merge(left, right, left_index = True, right_index = True), dfs)",
"_____no_output_____"
],
[
"# INTERPOLATION HAPPENS HERE -- Break up by city\ndf_final = df_final.groupby('city').apply(lambda group: group.interpolate(limit_direction = 'both'))\n\n# Need to do something special for weather_description\narr, cat = df_final['weather_description'].factorize()\ndf_final['weather_description'] = pd.Series(arr).replace(-1, np.nan).\\\ninterpolate(method = 'nearest', limit_direction = 'both')\\\n.interpolate(limit_direction = 'both').astype('category')\\\n.cat.rename_categories(cat).astype('str').values",
"_____no_output_____"
],
[
"# The whole purpose here is to encode wind direction. It's not continuous so don't really want to scale it\n# Also have more granularity in wind dir if need be.\n#dir_df = pd.DataFrame({'dir' : ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', 'N'],\n# 'lower' : [348.75, 11.25, 33.75, 56.25, 78.75, 101.25, 123.75, 146.25, 168.75, 191.25, 213.75, 236.25, 258.75, 281.25, 303.75, 326.25, 0],\n# 'upper' : [360, 33.75, 56.25, 78.75, 101.25, 123.75, 146.25, 168.75, 191.25, 213.75, 236.25, 258.75, 281.25, 303.75, 326.25, 348.75, 11.25]})\ndir_df = pd.DataFrame({'dir' : ['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N'],\n 'lower' : [337.5, 22.5, 67.5, 112.5, 157, 202.5, 247.5, 292.5, 0],\n 'upper' : [360, 67.5, 112.5, 157, 202.5, 247.5, 292.5, 337.5, 22.5]})\n# Make a copy to fool around in\nfill_this = df_final['wind_direction'].copy()\n# And overwrite the copy\nfor i in reversed(range(len(dir_df))):\n# print(str(dir_df.loc[i,'lower']) + \" and \" + str(dir_df.loc[i,'upper']))\n fill_this.loc[df_final['wind_direction'].between(dir_df.loc[i,'lower'], dir_df.loc[i,'upper'])] = i\n# This is a bit ugly here; but it maintains any missing values nicely\ndf_final['wind_direction'] = dir_df.loc[fill_this, 'dir'].values",
"_____no_output_____"
],
[
"# Scaling happens here -- IMPUTATION MUST HAPPEN FIRST\nscale_df = df_final[['latitude', 'longitude', 'temperature', 'humidity', 'pressure', 'wind_speed']].values\nscaler = MinMaxScaler()\n# We have access to min and max so we can transform back and forth\nscale_df = scaler.fit_transform(scale_df)\nprint(scaler.data_min_)\nprint(scaler.data_max_)\ndf_final_scaled = df_final.copy()\ndf_final_scaled[['latitude', 'longitude', 'temperature', 'humidity', 'pressure', 'wind_speed']] = scale_df\ndf_final_scaled.head()",
"[ 29.42412 -122.676208 242.33666667 5. 800.\n 0. ]\n[ 47.606209 -71.059769 321.22 100. 1100. 50. ]\n"
],
[
"# Collapse a lot of these groupings\nweather_dict = {'scattered clouds' : 'partly_cloudy', 'sky is clear' : 'clear', \n 'few clouds' : 'partly_cloudy', 'broken clouds' : 'partly_cloudy',\n 'overcast clouds' : 'cloudy', 'mist' : 'cloudy', 'haze' : 'cloudy', \n 'dust' : 'other', 'fog' : 'cloudy', 'moderate rain' : 'rain',\n 'light rain' : 'rain', 'heavy intensity rain' : 'rain', 'light intensity drizzle' : 'rain',\n 'heavy snow' : 'snow', 'snow' : 'snow', 'light snow' : 'snow', 'very heavy rain' : 'rain',\n 'thunderstorm' : 'tstorm', 'proximity thunderstorm' : 'tstorm', 'smoke' : 'other', 'freezing rain' : 'snow',\n 'thunderstorm with light rain' : 'tstorm', 'drizzle' : 'rain', 'sleet' : 'snow',\n 'thunderstorm with rain' : 'tstorm', 'thunderstorm with heavy rain' : 'tstorm',\n 'squalls' : 'rain', 'heavy intensity drizzle' : 'rain', 'light shower snow' : 'snow',\n 'light intensity shower rain' : 'rain', 'shower rain' : 'rain',\n 'heavy intensity shower rain' : 'rain', 'proximity shower rain' : 'rain',\n 'proximity sand/dust whirls' : 'other', 'proximity moderate rain' : 'rain', 'sand' : 'other',\n 'shower snow' : 'snow', 'proximity thunderstorm with rain' : 'tstorm',\n 'sand/dust whirls' : 'other', 'proximity thunderstorm with drizzle' : 'tstorm',\n 'thunderstorm with drizzle' : 'tstorm', 'thunderstorm with light drizzle' : 'tstorm',\n 'light rain and snow' : 'snow', 'thunderstorm with heavy drizzle' : 'tstorm',\n 'ragged thunderstorm' : 'tstorm', 'tornado' : 'other', 'volcanic ash' : 'other', 'shower drizzle' : 'rain',\n 'heavy shower snow' : 'snow', 'light intensity drizzle rain' : 'rain',\n 'light shower sleet' : 'snow', 'rain and snow' : 'snow'}",
"_____no_output_____"
],
[
"adj_weather = [weather_dict[val] for val in df_final_scaled['weather_description']]\ndf_final_scaled['adj_weather'] = adj_weather\ndf_final_scaled = df_final_scaled.drop('weather_description', axis = 1)",
"_____no_output_____"
],
[
"# And one-hot encode the wind_directions and weather_description\ndf_final_scaled = pd.get_dummies(df_final_scaled, prefix=['wind_dir', 'weather'], columns=['wind_direction', 'adj_weather'])",
"_____no_output_____"
],
[
"df_final_scaled = df_final_scaled.reset_index('city')\n# Train, test, valid split\ntrain_df = df_final_scaled[df_final_scaled.index < '2016-01-01 00:00:00']\ntest_df = df_final_scaled[df_final_scaled.index >= '2016-01-01 00:00:00']\ntest_df = test_df[test_df.index < '2017-01-01 00:00:00']\nvalid_df = df_final_scaled[df_final_scaled.index >= '2017-01-01 00:00:00']",
"_____no_output_____"
],
[
"train_df",
"_____no_output_____"
],
[
"# Write for distribution\n#df_final_scaled.to_csv('~/Downloads/df_weather_scaled_encoded')",
"_____no_output_____"
],
[
"practice = test_df[test_df['city'] == 'Philadelphia']",
"_____no_output_____"
],
[
"practice.shape",
"_____no_output_____"
],
[
"seq_len = 24\n\n",
"_____no_output_____"
],
[
"# NEED TO\n# Perform imputation on missing values -- Probably by city and day -- DONE\n# Join the tables -- DONE\n# Do min-max scaling -- DONE\n# Roll up the values to the daily level -- NOT DOING (this isn't what we were planning on doing in our proposal)\n# Encode the weather_description and wind direction as a one-hot -- DONE\n# Get the wind direction as a categorical -- DONE\n\n# Pretty good. Have some more to do now\n# Separate into training, testing, and validation --DONE\n# Fully break up the data into the Xtrain, Xtest, Xvalid, Ytrain, Ytest, and Yvalid",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79c329c92820627db09362ee5ca424bc8cbdba5 | 11,747 | ipynb | Jupyter Notebook | Image Classification/CGIAR Computer Vision for Crop Disease/recongratulationsyoure3iniclrworkshopchallenge1/model4.ipynb | ZindiAfrica/Computer-Vision | bf4c00a0633506270dc6d07df938a100a10ee799 | [
"MIT"
] | null | null | null | Image Classification/CGIAR Computer Vision for Crop Disease/recongratulationsyoure3iniclrworkshopchallenge1/model4.ipynb | ZindiAfrica/Computer-Vision | bf4c00a0633506270dc6d07df938a100a10ee799 | [
"MIT"
] | null | null | null | Image Classification/CGIAR Computer Vision for Crop Disease/recongratulationsyoure3iniclrworkshopchallenge1/model4.ipynb | ZindiAfrica/Computer-Vision | bf4c00a0633506270dc6d07df938a100a10ee799 | [
"MIT"
] | null | null | null | 11,747 | 11,747 | 0.687239 | [
[
[
"import numpy as np\nimport pandas as pd\nimport os\nimport random, re, math\nimport tensorflow as tf, tensorflow.keras.backend as K\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import optimizers\nfrom kaggle_datasets import KaggleDatasets\n\nfrom numpy.random import seed\nseed(2020)\ntf.random.set_seed(2020)\n\nprint(tf.__version__)\nprint(tf.keras.__version__)",
"_____no_output_____"
],
[
"!pip install efficientnet\nimport efficientnet.tfkeras as efn",
"_____no_output_____"
]
],
[
[
"#### TPU",
"_____no_output_____"
]
],
[
[
"AUTO = tf.data.experimental.AUTOTUNE\n# Detect hardware, return appropriate distribution strategy\ntry:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection. No parameters necessary if TPU_NAME environment variable is set. On Kaggle this is always the case.\n print('Running on TPU ', tpu.master())\nexcept ValueError:\n tpu = None\n\nif tpu:\n tf.config.experimental_connect_to_cluster(tpu)\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.experimental.TPUStrategy(tpu)\nelse:\n strategy = tf.distribute.get_strategy() # default distribution strategy in Tensorflow. Works on CPU and single GPU.\n\nprint(\"REPLICAS: \", strategy.num_replicas_in_sync)\n\n\n# Data access\nGCS_DS_PATH = KaggleDatasets().get_gcs_path()",
"_____no_output_____"
],
[
"from matplotlib import pyplot as plt\n\nimg = plt.imread('/kaggle/input/zindi-disease/train/healthy_wheat/4YI63K.jpg')\n\nprint(img.shape)\nplt.imshow(img)",
"_____no_output_____"
],
[
"path='/kaggle/input/zindi-disease/'",
"_____no_output_____"
],
[
"sub = pd.read_csv(path + 'sample_submission.csv')\n\nprint(len(sub))\nsub.head() ",
"_____no_output_____"
],
[
"dic = {}\nfor _,_, filenames in os.walk(path + '/test/'):\n for file in filenames:\n name, extension = os.path.splitext(file)\n dic[name] = extension\n \ntest = sub[['ID']].copy()\ntest['ID']=test['ID'].apply(lambda x: x + dic[x])\ntest['ID']='/test/' + test['ID']\n\nprint(len(test))\ntest.head()",
"_____no_output_____"
],
[
"plt.imshow(plt.imread(path + 'test/643083.JPG'))",
"_____no_output_____"
],
[
"test_paths = test.ID.apply(lambda x: GCS_DS_PATH + x).values",
"_____no_output_____"
],
[
"train=pd.DataFrame(columns=['ID','leaf_rust','stem_rust','healthy_wheat'])\ntrain.head() ",
"_____no_output_____"
],
[
"fn_hw = []\nfor _,_, filenames in os.walk(path + '/train/healthy_wheat/'):\n for filename in filenames:\n fn_hw.append('/train/healthy_wheat/' + filename)\n \nd_hw = {'ID': fn_hw, 'leaf_rust': 0, 'stem_rust':0, 'healthy_wheat':1}\n\ntrain=train.append(pd.DataFrame(d_hw))\nprint(len(train))\ntrain.head() ",
"_____no_output_____"
],
[
"fn_lr = []\nfor _,_, filenames in os.walk(path + '/train/leaf_rust/'):\n for filename in filenames:\n fn_lr.append('/train/leaf_rust/' + filename)\n\nd_lr = {'ID': fn_lr, 'leaf_rust': 1, 'stem_rust':0, 'healthy_wheat':0}\ntrain=train.append(pd.DataFrame(d_lr))\n\nprint(len(train))\ntrain.tail() ",
"_____no_output_____"
],
[
"fn_sr = []\nfor _,_, filenames in os.walk(path + '/train/stem_rust/'):\n for filename in filenames:\n fn_sr.append('/train/stem_rust/' + filename)\n \nd_sr = {'ID': fn_sr, 'leaf_rust': 0, 'stem_rust':1, 'healthy_wheat':0}\ntrain=train.append(pd.DataFrame(d_sr))\n\nprint(len(train))\ntrain.tail()",
"_____no_output_____"
],
[
"train_paths = train.ID.apply(lambda x: GCS_DS_PATH + x).values",
"_____no_output_____"
],
[
"train_labels = train.loc[:, 'leaf_rust':].astype('int64').values\ntrain_labels",
"_____no_output_____"
],
[
"type(train_labels[0][0])",
"_____no_output_____"
],
[
"nb_classes = 3\nBATCH_SIZE = 8 * strategy.num_replicas_in_sync\nimg_size = 768\nEPOCHS = 40",
"_____no_output_____"
],
[
"def decode_image(filename, label=None, image_size=(img_size, img_size)):\n bits = tf.io.read_file(filename)\n image = tf.image.decode_jpeg(bits, channels=3)\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.image.resize(image, image_size)\n if label is None:\n return image\n else:\n return image, label\n \ndef data_augment(image, label=None, seed=2020):\n image = tf.image.random_flip_left_right(image, seed=seed)\n image = tf.image.random_flip_up_down(image, seed=seed)\n \n if label is None:\n return image\n else:\n return image, label",
"_____no_output_____"
],
[
"def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift):\n # returns 3x3 transformmatrix which transforms indicies\n \n # CONVERT DEGREES TO RADIANS\n rotation = math.pi * rotation / 180.\n shear = math.pi * shear / 180.\n \n # ROTATION MATRIX\n c1 = tf.math.cos(rotation)\n s1 = tf.math.sin(rotation)\n one = tf.constant([1],dtype='float32')\n zero = tf.constant([0],dtype='float32')\n rotation_matrix = tf.reshape( tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3] )\n \n # SHEAR MATRIX\n c2 = tf.math.cos(shear)\n s2 = tf.math.sin(shear)\n shear_matrix = tf.reshape( tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3] ) \n \n # ZOOM MATRIX\n zoom_matrix = tf.reshape( tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3] )\n \n # SHIFT MATRIX\n shift_matrix = tf.reshape( tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3] )\n \n return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix))",
"_____no_output_____"
],
[
"def transform(image,label):\n # input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]\n # output - image randomly rotated, sheared, zoomed, and shifted\n DIM = img_size\n XDIM = DIM%2 #fix for size 331\n \n rot = 15. * tf.random.normal([1],dtype='float32')\n shr = 5. * tf.random.normal([1],dtype='float32') \n h_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10.\n w_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10.\n h_shift = 16. * tf.random.normal([1],dtype='float32') \n w_shift = 16. * tf.random.normal([1],dtype='float32') \n \n # GET TRANSFORMATION MATRIX\n m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) \n\n # LIST DESTINATION PIXEL INDICES\n x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )\n y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )\n z = tf.ones([DIM*DIM],dtype='int32')\n idx = tf.stack( [x,y,z] )\n \n # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS\n idx2 = K.dot(m,tf.cast(idx,dtype='float32'))\n idx2 = K.cast(idx2,dtype='int32')\n idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)\n \n # FIND ORIGIN PIXEL VALUES \n idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )\n d = tf.gather_nd(image,tf.transpose(idx3))\n \n return tf.reshape(d,[DIM,DIM,3]),label",
"_____no_output_____"
],
[
"train_dataset = (\n tf.data.Dataset\n .from_tensor_slices((train_paths, train_labels))\n .map(decode_image, num_parallel_calls=AUTO)\n .map(data_augment, num_parallel_calls=AUTO)\n .map(transform, num_parallel_calls=AUTO)\n .repeat()\n .shuffle(512)\n .batch(BATCH_SIZE)\n .prefetch(AUTO)\n )",
"_____no_output_____"
],
[
"test_dataset = (\n tf.data.Dataset\n .from_tensor_slices(test_paths)\n .map(decode_image, num_parallel_calls=AUTO)\n .batch(BATCH_SIZE)\n)",
"_____no_output_____"
],
[
"LR_START = 0.00001\nLR_MAX = 0.0001 * strategy.num_replicas_in_sync\nLR_MIN = 0.00001\nLR_RAMPUP_EPOCHS = 25\nLR_SUSTAIN_EPOCHS = 3\nLR_EXP_DECAY = .8\n\ndef lrfn(epoch):\n if epoch < LR_RAMPUP_EPOCHS:\n lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START\n elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:\n lr = LR_MAX\n else:\n lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN\n return lr\n \nlr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True)\n\nrng = [i for i in range(EPOCHS)]\ny = [lrfn(x) for x in rng]\nplt.plot(rng, y)\nprint(\"Learning rate schedule: {:.3g} to {:.3g} to {:.3g}\".format(y[0], max(y), y[-1]))",
"_____no_output_____"
],
[
"def get_model():\n base_model = efn.EfficientNetB7(weights='imagenet', include_top=False, pooling='avg', input_shape=(img_size, img_size, 3))\n x = base_model.output\n predictions = Dense(nb_classes, activation=\"softmax\")(x)\n return Model(inputs=base_model.input, outputs=predictions)",
"_____no_output_____"
],
[
"with strategy.scope():\n model = get_model()\n \nmodel.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])",
"_____no_output_____"
],
[
"%%time\nmodel.fit(\n train_dataset, \n steps_per_epoch=train_labels.shape[0] // BATCH_SIZE,\n callbacks=[lr_callback],\n epochs=EPOCHS\n)",
"_____no_output_____"
],
[
"%%time\nprobs = model.predict(test_dataset)",
"_____no_output_____"
],
[
"sub.loc[:, 'leaf_rust':] = probs\nsub.to_csv('submission.csv', index=False)\nsub.head()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79c333db53d2e21dda6139bdffa7d52718ae83d | 7,113 | ipynb | Jupyter Notebook | online_judges/prod_three/prod_three_solution.ipynb | sophomore99/PythonInterective | f2ff4d798274218e8543e071141b60c35e86a3eb | [
"Apache-2.0"
] | 8 | 2017-04-16T03:30:36.000Z | 2021-02-04T06:45:30.000Z | online_judges/prod_three/prod_three_solution.ipynb | sophomore99/PythonInterective | f2ff4d798274218e8543e071141b60c35e86a3eb | [
"Apache-2.0"
] | null | null | null | online_judges/prod_three/prod_three_solution.ipynb | sophomore99/PythonInterective | f2ff4d798274218e8543e071141b60c35e86a3eb | [
"Apache-2.0"
] | 7 | 2017-09-18T09:19:02.000Z | 2019-11-22T06:15:50.000Z | 25.403571 | 185 | 0.459581 | [
[
[
"This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).",
"_____no_output_____"
],
[
"# Solution Notebook",
"_____no_output_____"
],
[
"## Problem: Find the highest product of three numbers in a list.\n\n* [Constraints](#Constraints)\n* [Test Cases](#Test-Cases)\n* [Algorithm](#Algorithm)\n* [Code](#Code)\n* [Unit Test](#Unit-Test)",
"_____no_output_____"
],
[
"## Constraints\n\n* Is the input a list of integers?\n * Yes\n* Can we get negative inputs?\n * Yes\n* Can there be duplicate entires in the input?\n * Yes\n* Will there always be at least three integers?\n * No\n* Can we assume the inputs are valid?\n * No, check for None input\n* Can we assume this fits memory?\n * Yes",
"_____no_output_____"
],
[
"## Test Cases\n\n* None -> TypeError\n* Less than three ints -> ValueError\n* [5, -2, 3] -> -30\n* [5, -2, 3, 1, -1, 4] -> 60",
"_____no_output_____"
],
[
"## Algorithm\n\n### Brute force:\n\nUse three loops and multiple each numbers.\n\nComplexity:\n* Time: O(n^3)\n* Space: O(1)\n\n### Sorting:\n\nSort the list, multiply the last three elements.\n\nComplexity:\n* Time: O(n log(n))\n* Space: O(1)\n\n### Greedy:\n\n<pre>\n 0 1 2 3 4 5\n[5, -2, 3, 1, -1, 4] -> 60\n\nmax_prod_of_three = -30\nmax_prod_of_two = -10\nmax_num = 5\nmin_prod_of_two = -10\nmin_num = -2\n\n 0 1 2 3 4 5\n[5, -2, 3, 1, -1, 4] -> 60\n ^\nmax_prod_of_three = -30\nmax_prod_of_two = 15\nmax_num = 5\nmin_prod_of_two = -10\nmin_num = -2\n\n 0 1 2 3 4 5\n[5, -2, 3, 1, -1, 4] -> 60\n ^\nmax_prod_of_three = 15\nmax_prod_of_two = 15\nmax_num = 5\nmin_prod_of_two = -10\nmin_num = -2\n\n 0 1 2 3 4 5\n[5, -2, 3, 1, -1, 4] -> 60\n ^\nmax_prod_of_three = 15\nmax_prod_of_two = 15\nmax_num = 5\nmin_prod_of_two = -10\nmin_num = -2\n\n 0 1 2 3 4 5\n[5, -2, 3, 1, -1, 4] -> 60\n ^\nmax_prod_of_three = 60\nmax_prod_of_two = 15\nmax_num = 5\nmin_prod_of_two = -10\nmin_num = -2\n</pre>\n\nComplexity:\n* Time: O(n)\n* Space: O(1)",
"_____no_output_____"
],
[
"## Code",
"_____no_output_____"
]
],
[
[
"class Solution(object):\n\n def max_prod_three_nlogn(self, array):\n if array is None:\n raise TypeError('array cannot be None')\n if len(array) < 3:\n raise ValueError('array must have 3 or more ints')\n array.sort()\n product = 1\n for item in array[-3:]:\n product *= item\n return product\n\n def max_prod_three(self, array):\n if array is None:\n raise TypeError('array cannot be None')\n if len(array) < 3:\n raise ValueError('array must have 3 or more ints')\n curr_max_prod_three = array[0] * array[1] * array[2]\n max_prod_two = array[0] * array[1]\n min_prod_two = array[0] * array[1]\n max_num = max(array[0], array[1])\n min_num = min(array[0], array[1])\n for i in range(2, len(array)):\n curr_max_prod_three = max(curr_max_prod_three,\n max_prod_two * array[i],\n min_prod_two * array[i])\n max_prod_two = max(max_prod_two,\n max_num * array[i],\n min_num * array[i])\n min_prod_two = min(min_prod_two,\n max_num * array[i],\n min_num * array[i])\n max_num = max(max_num, array[i])\n min_num = min(min_num, array[i])\n return curr_max_prod_three",
"_____no_output_____"
]
],
[
[
"## Unit Test",
"_____no_output_____"
]
],
[
[
"%%writefile test_prod_three.py\nfrom nose.tools import assert_equal, assert_raises\n\n\nclass TestProdThree(object):\n\n def test_prod_three(self):\n solution = Solution()\n assert_raises(TypeError, solution.max_prod_three, None)\n assert_raises(ValueError, solution.max_prod_three, [1, 2])\n assert_equal(solution.max_prod_three([5, -2, 3]), -30)\n assert_equal(solution.max_prod_three([5, -2, 3, 1, -1, 4]), 60)\n print('Success: test_prod_three')\n\n\ndef main():\n test = TestProdThree()\n test.test_prod_three()\n\n\nif __name__ == '__main__':\n main()",
"Overwriting test_prod_three.py\n"
],
[
"%run -i test_prod_three.py",
"Success: test_prod_three\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79c3573bed10400c81adc1e6099e9989cc82d24 | 6,149 | ipynb | Jupyter Notebook | 4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb | impastasyndrome/Lambda-Resource-Static-Assets | 7070672038620d29844991250f2476d0f1a60b0a | [
"MIT"
] | 8 | 2020-09-02T03:59:02.000Z | 2022-01-08T23:36:19.000Z | 4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb | impastasyndrome/Lambda-Resource-Static-Assets | 7070672038620d29844991250f2476d0f1a60b0a | [
"MIT"
] | null | null | null | 4-assets/BOOKS/Jupyter-Notebooks/06-Unzipping-and-Zipping-Files-checkpoint.ipynb | impastasyndrome/Lambda-Resource-Static-Assets | 7070672038620d29844991250f2476d0f1a60b0a | [
"MIT"
] | 3 | 2020-11-18T12:13:05.000Z | 2021-02-24T19:31:50.000Z | 22.36 | 265 | 0.555212 | [
[
[
"# Unzipping and Zipping Files\n\nAs you are probably aware, files can be compressed to a zip format. Often people use special programs on their computer to unzip these files, luckily for us, Python can do the same task with just a few simple lines of code.",
"_____no_output_____"
],
[
"## Create Files to Compress",
"_____no_output_____"
]
],
[
[
"# slashes may need to change for MacOS or Linux\nf = open(\"new_file.txt\",'w+')\nf.write(\"Here is some text\")\nf.close()",
"_____no_output_____"
],
[
"# slashes may need to change for MacOS or Linux\nf = open(\"new_file2.txt\",'w+')\nf.write(\"Here is some text\")\nf.close()",
"_____no_output_____"
]
],
[
[
"## Zipping Files\n\nThe [zipfile library](https://docs.python.org/3/library/zipfile.html) is built in to Python, we can use it to compress folders or files. To compress all files in a folder, just use the os.walk() method to iterate this process for all the files in a directory.",
"_____no_output_____"
]
],
[
[
"import zipfile",
"_____no_output_____"
]
],
[
[
" Create Zip file first , then write to it (the write step compresses the files.)",
"_____no_output_____"
]
],
[
[
"comp_file = zipfile.ZipFile('comp_file.zip','w')",
"_____no_output_____"
],
[
"comp_file.write(\"new_file.txt\",compress_type=zipfile.ZIP_DEFLATED)",
"_____no_output_____"
],
[
"comp_file.write('new_file2.txt',compress_type=zipfile.ZIP_DEFLATED)",
"_____no_output_____"
],
[
"comp_file.close()",
"_____no_output_____"
]
],
[
[
"## Extracting from Zip Files\n\nWe can easily extract files with either the extractall() method to get all the files, or just using the extract() method to only grab individual files.",
"_____no_output_____"
]
],
[
[
"zip_obj = zipfile.ZipFile('comp_file.zip','r')",
"_____no_output_____"
],
[
"zip_obj.extractall(\"extracted_content\")",
"_____no_output_____"
]
],
[
[
"________",
"_____no_output_____"
],
[
"# Using shutil library\n\nOften you don't want to extract or archive individual files from a .zip, but instead archive everything at once. The shutil library that is built in to python has easy to use commands for this:",
"_____no_output_____"
]
],
[
[
"import shutil",
"_____no_output_____"
]
],
[
[
"The shutil library can accept a format parameter, `format` is the archive format: one of \"zip\", \"tar\", \"gztar\", \"bztar\",\nor \"xztar\".",
"_____no_output_____"
]
],
[
[
"pwd",
"_____no_output_____"
],
[
"directory_to_zip='C:\\\\Users\\\\Marcial\\\\Pierian-Data-Courses\\\\Complete-Python-3-Bootcamp\\\\12-Advanced Python Modules'",
"_____no_output_____"
],
[
"# Creating a zip archive\noutput_filename = 'example'\n# Just fill in the output_filename and the directory to zip\n# Note this won't run as is because the variable are undefined\nshutil.make_archive(output_filename,'zip',directory_to_zip)",
"_____no_output_____"
],
[
"# Extracting a zip archive\n# Notice how the parameter/argument order is slightly different here\nshutil.unpack_archive(output_filename,dir_for_extract_result,'zip')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e79c36b537bf356dce9651e980a84003d6964a09 | 150,530 | ipynb | Jupyter Notebook | 3.1 - Interventions.ipynb | m5l14i11/Causality | 33b535b7cea6d232d8f6afa0ffad9a832dd4f3a7 | [
"MIT"
] | 353 | 2020-02-03T19:34:16.000Z | 2022-03-29T09:53:38.000Z | 3.1 - Interventions.ipynb | m5l14i11/Causality | 33b535b7cea6d232d8f6afa0ffad9a832dd4f3a7 | [
"MIT"
] | null | null | null | 3.1 - Interventions.ipynb | m5l14i11/Causality | 33b535b7cea6d232d8f6afa0ffad9a832dd4f3a7 | [
"MIT"
] | 78 | 2020-02-08T20:10:01.000Z | 2022-03-29T09:53:29.000Z | 414.683196 | 40,664 | 0.941659 | [
[
[
"<div style=\"width: 100%; overflow: hidden;\">\n <div style=\"width: 150px; float: left;\"> <img src=\"https://raw.githubusercontent.com/DataForScience/Networks/master/data/D4Sci_logo_ball.png\" alt=\"Data For Science, Inc\" align=\"left\" border=\"0\" width=150px> </div>\n <div style=\"float: left; margin-left: 10px;\"> <h1>Causal Inference In Statistics - A Primer</h1>\n <h1>3.1 Interventions</h1>\n <p>Bruno Gonçalves<br/>\n <a href=\"http://www.data4sci.com/\">www.data4sci.com</a><br/>\n @bgoncalves, @data4sci</p></div>\n <div style=\"float: right; margin-right:10px;\"> <p><a href=\"https://amzn.to/3gsFlkO\" target=_blank><img src='data/causality.jpeg' width='100px'>\n <!--Amazon Affiliate Link--></a></p></div>\n</div>",
"_____no_output_____"
]
],
[
[
"from collections import Counter\nfrom pprint import pprint\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt \n\nfrom CausalModel import CausalModel\n\nimport watermark\n\n%load_ext watermark\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"We start by print out the versions of the libraries we're using for future reference",
"_____no_output_____"
]
],
[
[
"%watermark -n -v -m -g -iv",
"watermark 2.0.2\njson 2.0.9\npandas 1.0.1\nmatplotlib 3.1.3\nnumpy 1.18.1\nautopep8 1.5\nSun Oct 18 2020 \n\nCPython 3.7.3\nIPython 6.2.1\n\ncompiler : Clang 4.0.1 (tags/RELEASE_401/final)\nsystem : Darwin\nrelease : 19.6.0\nmachine : x86_64\nprocessor : i386\nCPU cores : 8\ninterpreter: 64bit\nGit hash : 96fdced5915a840d6140e161f1d2827cf29f6e31\n"
]
],
[
[
"Load default figure style",
"_____no_output_____"
]
],
[
[
"plt.style.use('./d4sci.mplstyle')\ncolors = plt.rcParams['axes.prop_cycle'].by_key()['color']",
"_____no_output_____"
]
],
[
[
"## Graph Surgery",
"_____no_output_____"
]
],
[
[
"G = CausalModel()\nG.add_causation('Ux', 'X')\nG.add_causation('Uy', 'Y')\nG.add_causation('Uz', 'Z')\nG.add_causation('Z', 'X')\nG.add_causation('Z', 'Y')\n\nG.pos = {'Z': (0, 1), 'X': (-1, 0), 'Y':(1, 0), 'Uz':(0, 2), 'Ux':(-1, 1), 'Uy': (1,1)}",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, figsize=(3, 2.5))\nG.plot(ax=ax)",
"_____no_output_____"
],
[
"G.save_model('dags/Primer.Fig.3.1.dot')",
"_____no_output_____"
],
[
"G2 = G.copy()\nG2.dag.remove_edges_from(list(G.dag.in_edges('X')))\nG2.dag.remove_node('Ux')\ndel G2.pos['Ux']",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, figsize=(3.2, 2.5))\nG2.plot(ax=ax)",
"_____no_output_____"
],
[
"G2.save_model('dags/Primer.Fig.3.2.dot')",
"_____no_output_____"
],
[
"G = CausalModel()\nG.add_causation('Ux', 'X')\nG.add_causation('Uy', 'Y')\nG.add_causation('Uz', 'Z')\nG.add_causation('Z', 'X')\nG.add_causation('Z', 'Y')\nG.add_causation('X', 'Y')\n\nG.pos = {'Z': (0, 1), 'X': (-1, 0), 'Y':(1, 0), 'Uz':(0, 2), 'Ux':(-1, 1), 'Uy': (1,1)}",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, figsize=(3, 2.5))\nG.plot(ax=ax)",
"_____no_output_____"
],
[
"G.save_model('dags/Primer.Fig.3.3.dot')",
"_____no_output_____"
],
[
"G2 = G.intervention_graph('X', drop_nodes=True)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, figsize=(3, 2.5))\nG2.plot(ax=ax)",
"_____no_output_____"
],
[
"G.save_model('dags/Primer.Fig.3.4.dot')",
"_____no_output_____"
]
],
[
[
"<div style=\"width: 100%; overflow: hidden;\">\n <img src=\"data/D4Sci_logo_full.png\" alt=\"Data For Science, Inc\" align=\"center\" border=\"0\" width=300px> \n</div>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e79c42e18d7077305e261de99da355791084bdb9 | 21,789 | ipynb | Jupyter Notebook | 03_Grouping/Alcohol_Consumption/Exercise.ipynb | coderhh/pandas_exercises | b4f10151d30a0d765d130b41a2d8050a09538c76 | [
"BSD-3-Clause"
] | null | null | null | 03_Grouping/Alcohol_Consumption/Exercise.ipynb | coderhh/pandas_exercises | b4f10151d30a0d765d130b41a2d8050a09538c76 | [
"BSD-3-Clause"
] | null | null | null | 03_Grouping/Alcohol_Consumption/Exercise.ipynb | coderhh/pandas_exercises | b4f10151d30a0d765d130b41a2d8050a09538c76 | [
"BSD-3-Clause"
] | null | null | null | 29.404858 | 133 | 0.321492 | [
[
[
"# Ex - GroupBy",
"_____no_output_____"
],
[
"### Introduction:\n\nGroupBy can be summarized as Split-Apply-Combine.\n\nSpecial thanks to: https://github.com/justmarkham for sharing the dataset and materials.\n\nCheck out this [Diagram](http://i.imgur.com/yjNkiwL.png) \n### Step 1. Import the necessary libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
]
],
[
[
"### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv). ",
"_____no_output_____"
],
[
"### Step 3. Assign it to a variable called drinks.",
"_____no_output_____"
]
],
[
[
"url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/drinks.csv'\ndrinks = pd.read_csv(url)\ndrinks",
"_____no_output_____"
]
],
[
[
"### Step 4. Which continent drinks more beer on average?",
"_____no_output_____"
]
],
[
[
"drinks.groupby('continent').beer_servings.mean()",
"_____no_output_____"
]
],
[
[
"### Step 5. For each continent print the statistics for wine consumption.",
"_____no_output_____"
]
],
[
[
"drinks.groupby('continent').wine_servings.describe()",
"_____no_output_____"
]
],
[
[
"### Step 6. Print the mean alcohol consumption per continent for every column",
"_____no_output_____"
]
],
[
[
"drinks.groupby('continent').mean()",
"_____no_output_____"
]
],
[
[
"### Step 7. Print the median alcohol consumption per continent for every column",
"_____no_output_____"
]
],
[
[
"drinks.groupby('continent').median()",
"_____no_output_____"
]
],
[
[
"### Step 8. Print the mean, min and max values for spirit consumption.\n#### This time output a DataFrame",
"_____no_output_____"
]
],
[
[
"drinks.groupby('continent').spirit_servings.agg(['mean', 'min', 'max'])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79c513740d3a4a1c47eb5dec6d16803da9b7265 | 254,889 | ipynb | Jupyter Notebook | _notebooks/2020-12-23-week3-day1.ipynb | ytu-cvlab/Resource-Blog | d794581ef2efadf56a96fd71e487292ec31179b9 | [
"Apache-2.0"
] | 3 | 2021-06-14T02:49:16.000Z | 2021-08-13T06:38:12.000Z | _notebooks/2020-12-23-week3-day1.ipynb | ytu-cvlab/Resource-Blog | d794581ef2efadf56a96fd71e487292ec31179b9 | [
"Apache-2.0"
] | null | null | null | _notebooks/2020-12-23-week3-day1.ipynb | ytu-cvlab/Resource-Blog | d794581ef2efadf56a96fd71e487292ec31179b9 | [
"Apache-2.0"
] | null | null | null | 130.444729 | 88,764 | 0.846655 | [
[
[
"# Week 3, Day 1 (Dataset Preparation and Arrangement)\n> Welcome to first day (Week 3) of the McE-51069 course.\n- sticky_rank: 7\n- toc: true\n- badges: false\n- comments: false\n- categories: [deep_learning, computer_vision]",
"_____no_output_____"
],
[
"You can download resources for today from this [link](https://github.com/ytu-cvlab/mce-51069-week3-day1/archive/main.zip). We have also posted a [guide video](https://www.youtube.com/watch?v=hIaURCPvCf4) on downloading and accessing materials on [youtube channel](https://www.youtube.com/channel/UCDFhKEbfpxKXVk4Mryh7yhA).",
"_____no_output_____"
],
[
"<a id=data></a>\n# Datasets",
"_____no_output_____"
],
[
"Datasets comes in different forms from various sources. So the question here is what exactly is a dataset and how do we handle datasets for machine learning? To experiment the conditions, we must first know how to manipulate a dataset.",
"_____no_output_____"
],
[
"<a id=pandas></a>\n## Brief Introduction to Pandas ",
"_____no_output_____"
],
[
"Pandas is a python library for data manipulation and analysis. In this section, we will feature a brief introuction to pandas.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport math\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"Pandas stores data in dataframe objects. We can assign columns to each to numpy array (or) list to create a dataframe.",
"_____no_output_____"
]
],
[
[
"#Create a dataframe \n\nnames = ['Jack','Jean','Jennifer','Jimmy']\nages = np.array([23,22,24,21])\n# print(type(names))\n# print(type(ages))\n\ndf = pd.DataFrame({'name': names,\n 'age': ages,\n 'city': ['London', 'Berlin', 'New York', 'Sydney']},index=None)\n\ndf.head()\n# df.style.hide_index()",
"_____no_output_____"
]
],
[
[
"Now, let's see some handy dataframe tricks.",
"_____no_output_____"
]
],
[
[
"df[['name','city']]",
"_____no_output_____"
],
[
"df.info()\n# print(df.columns)\n# print(df.age)",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 4 entries, 0 to 3\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 name 4 non-null object\n 1 age 4 non-null int32 \n 2 city 4 non-null object\ndtypes: int32(1), object(2)\nmemory usage: 208.0+ bytes\n"
]
],
[
[
"Now that we know how to create a dataframe, we can save the dataframe we created.",
"_____no_output_____"
]
],
[
[
"df.to_csv('Ages_and_cities.csv',index=False,header=True)",
"_____no_output_____"
],
[
"df = pd.read_csv('Ages_and_cities.csv')\ndf.head()",
"_____no_output_____"
]
],
[
[
"<a id=Uyd></a>\n## Understanding your dataset",
"_____no_output_____"
],
[
"In this section, we used [Iris flowers dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set), which contains petal and sepal measurements of three species of Iris flowers.",
"_____no_output_____"
],
[
"#### Three species of Iris flowers from the dataset",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"#### Sepal vs Petal",
"_____no_output_____"
],
[
"This dataset was introduced by biologist Ronald Fisher in his 1936 paper. The following figure explains the way length and width are mesured or petal and speal of each flower. ",
"_____no_output_____"
],
[
"\n\n[Image source](https://www.oreilly.com/library/view/python-artificial-intelligence/9781789539462/assets/462dc4fa-fd62-4539-8599-ac80a441382c.png)",
"_____no_output_____"
],
[
"When we observe the dataset, we will discover that the dataset has four features and three unique labels for three flowers.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('iris_data.csv')\ndf.head()\n# df.head(3)",
"_____no_output_____"
],
[
"df.tail()",
"_____no_output_____"
]
],
[
[
"<a id=slice></a>\n## Slicing data",
"_____no_output_____"
],
[
"Now that we understand our dataset, let's prepare to seperate our data based on labels for unique visualization.",
"_____no_output_____"
]
],
[
[
"# df.loc[:3]\ndf.loc[80:85,(\"sepal_length\",\"variety\")]",
"_____no_output_____"
],
[
"# df.iloc[146:]\n# df.iloc[80:85,2:5]\ndf.iloc[80:85,[0,4]]",
"_____no_output_____"
],
[
"Se= df.loc[df.variety =='Setosa', :]\nVc= df.loc[df.variety =='Versicolor', :]\nVi= df.loc[df.variety =='Virginica', :]\nVi.head()",
"_____no_output_____"
]
],
[
[
"<a id=featureviz></a>\n## Feature visualization",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('iris_data.csv')\n# df.dtypes",
"_____no_output_____"
]
],
[
[
"First, we will visualize each measurement with histograms to observe the output distribution for each class.",
"_____no_output_____"
]
],
[
[
"# df.hist(\"sepal.length\",bins=15,edgecolor='black')\n\nplt.figure(figsize=(15,15))\n\nplt.subplot(2, 2, 1)\nplt.hist(Se.sepal_length,bins=15,color=\"steelblue\",edgecolor='black',alpha =0.4, label=\"Setosa\")\nplt.hist(Vc.sepal_length,bins=15,color='red',edgecolor='black', alpha =0.3, label=\"Versicolor\")\nplt.hist(Vi.sepal_length,bins=15,color='blue',edgecolor='black', alpha =0.3, label=\"Virginica\")\nplt.title(\"sepal length distribution\"), plt.xlabel('cm')\nplt.legend()\n\nplt.subplot(2, 2, 2)\nplt.hist(Se.sepal_width,bins=15,color=\"steelblue\",edgecolor='black',alpha =0.4, label=\"Setosa\")\nplt.hist(Vc.sepal_width,bins=15,color='red',edgecolor='black', alpha =0.3, label=\"Versicolor\")\nplt.hist(Vi.sepal_width,bins=15,color='blue',edgecolor='black', alpha =0.3, label=\"Virginica\")\nplt.title(\"sepal width distribution\"), plt.xlabel('cm')\nplt.legend()\n\nplt.subplot(2, 2, 3)\nplt.hist(Se.petal_length,bins=10,color=\"steelblue\",edgecolor='black',alpha =0.4, label=\"Setosa\")\nplt.hist(Vc.petal_length,bins=10,color='red',edgecolor='black', alpha =0.3, label=\"Versicolor\")\nplt.hist(Vi.petal_length,bins=10,color='blue',edgecolor='black', alpha =0.3, label=\"Virginica\")\nplt.title(\"petal length distribution\"), plt.xlabel('cm')\nplt.legend()\n\nplt.subplot(2, 2, 4)\nplt.hist(Se.petal_width,bins=10,color=\"steelblue\",edgecolor='black',alpha =0.4, label=\"Setosa\")\nplt.hist(Vc.petal_width,bins=10,color='red',edgecolor='black', alpha =0.3, label=\"Versicolor\")\nplt.hist(Vi.petal_width,bins=10,color='blue',edgecolor='black', alpha =0.3, label=\"Virginica\")\nplt.title(\"petal width distribution\"), plt.xlabel('cm')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"Now, we will visualize multiple features with scatter plots to gain some more insights.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(15,15))\n\narea = np.pi*20\n\nplt.subplot(2, 2, 1)\nplt.scatter(Se.sepal_length,Se.sepal_width, s=area, c=\"steelblue\", alpha=0.6, label=\"Setosa\")\nplt.scatter(Vc.sepal_length,Vc.sepal_width, s=area, c=\"red\", alpha=0.6, label=\"Versicolor\")\nplt.scatter(Vi.sepal_length,Vi.sepal_width, s=area, c=\"blue\", alpha=0.5, label=\"Virginica\")\nplt.title(\"sepal length Vs sepal width\"), plt.xlabel('cm'), plt.ylabel('cm')\nplt.legend()\n\nplt.subplot(2, 2, 2)\nplt.scatter(Se.petal_length,Se.petal_width, s=area, c=\"steelblue\", alpha=0.6, label=\"Setosa\")\nplt.scatter(Vc.petal_length,Vc.petal_width, s=area, c=\"red\", alpha=0.6, label=\"Versicolor\")\nplt.scatter(Vi.petal_length,Vi.petal_width, s=area, c=\"blue\", alpha=0.5, label=\"Virginica\")\nplt.title(\"petal length Vs petal width\"), plt.xlabel('cm'), plt.ylabel('cm')\nplt.legend()\n\nplt.subplot(2, 2, 3)\nplt.scatter(Se.sepal_length,Se.petal_length, s=area, c=\"steelblue\", alpha=0.6, label=\"Setosa\")\nplt.scatter(Vc.sepal_length,Vc.petal_length, s=area, c=\"red\", alpha=0.6, label=\"Versicolor\")\nplt.scatter(Vi.sepal_length,Vi.petal_length, s=area, c=\"blue\", alpha=0.5, label=\"Virginica\")\nplt.title(\"sepal length Vs petal length\"), plt.xlabel('cm'), plt.ylabel('cm')\nplt.legend()\n\nplt.subplot(2, 2, 4)\nplt.scatter(Se.sepal_width,Se.petal_width, s=area, c=\"steelblue\", alpha=0.6, label=\"Setosa\")\nplt.scatter(Vc.sepal_width,Vc.petal_width, s=area, c=\"red\", alpha=0.6, label=\"Versicolor\")\nplt.scatter(Vi.sepal_width,Vi.petal_width, s=area, c=\"blue\", alpha=0.5, label=\"Virginica\")\nplt.title(\"sepal width Vs petal width\"), plt.xlabel('cm'), plt.ylabel('cm')\nplt.legend()\n",
"_____no_output_____"
]
],
[
[
"We can definitely see some blobs forming from these visualizations. \"Setosa\" class unsally stands out from the other two classes but the sepal width vs sepal length plot shows \"versicolor\" and \"virginica\" classes will more challenging to classify compared to \"setosa\" class.",
"_____no_output_____"
],
[
"<a id=train></a>\n## Training the model",
"_____no_output_____"
],
[
"[Scikit-learn](https://scikit-learn.org/stable/) is a free machine learning library for Python which features various classification, regression and clustering algorithms.\n\n[Seaborn](https://seaborn.pydata.org/) is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nimport seaborn as sns",
"_____no_output_____"
],
[
"df = pd.read_csv('iris_data.csv')\n# df.dtypes\ndf.tail()",
"_____no_output_____"
],
[
"train_X, test_X, train_y, test_y = train_test_split(df[df.columns[0:4]].values,\n df.variety.values, test_size=0.25)\n\nmodelDT = DecisionTreeClassifier().fit(train_X, train_y)\nDT_predicted = modelDT.predict(test_X)\n\nmodelRF = RandomForestClassifier().fit(train_X, train_y)\nRF_predicted = modelRF.predict(test_X)",
"_____no_output_____"
]
],
[
[
"<a id=evaluate></a>\n## Model Evaluation",
"_____no_output_____"
],
[
"#### Decision Tree classifier",
"_____no_output_____"
]
],
[
[
"print(metrics.classification_report(DT_predicted, test_y))",
" precision recall f1-score support\n\n Setosa 1.00 1.00 1.00 14\n Versicolor 0.78 0.88 0.82 8\n Virginica 0.93 0.88 0.90 16\n\n accuracy 0.92 38\n macro avg 0.90 0.92 0.91 38\nweighted avg 0.93 0.92 0.92 38\n\n"
],
[
"mat = metrics.confusion_matrix(test_y, DT_predicted)\nsns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)\nplt.xlabel('true label')\nplt.ylabel('predicted label');",
"_____no_output_____"
]
],
[
[
"#### Ramdom Forest Classifier",
"_____no_output_____"
]
],
[
[
"print(metrics.classification_report(RF_predicted, test_y))",
" precision recall f1-score support\n\n Setosa 1.00 1.00 1.00 14\n Versicolor 0.78 0.88 0.82 8\n Virginica 0.93 0.88 0.90 16\n\n accuracy 0.92 38\n macro avg 0.90 0.92 0.91 38\nweighted avg 0.93 0.92 0.92 38\n\n"
],
[
"from sklearn.metrics import confusion_matrix\nimport seaborn as sns\n\nmat = confusion_matrix(test_y, RF_predicted)\nsns.heatmap(mat.T, square=True, annot=True,fmt='d', cbar=False)\nplt.xlabel('true label')\nplt.ylabel('predicted label');",
"_____no_output_____"
]
],
[
[
"[colab notebook](https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.08-Random-Forests.ipynb)",
"_____no_output_____"
],
[
"<a id=engineer></a>\n## Feature Engineering",
"_____no_output_____"
],
[
"When generating new features, the product between two features is usually not recommended to engineer unless it makes a magnification of the situation. Here, we use two new features, petal hypotenuse and petal product.",
"_____no_output_____"
]
],
[
[
"#Generate new features\ndf = pd.read_csv('iris_data.csv')\ndf['petal_hypotenuse'] = np.sqrt(df[\"petal_length\"]**2+df[\"petal_width\"]**2)\ndf['petal_product']=df[\"petal_length\"]*df[\"petal_width\"]\n\ndf.tail()",
"_____no_output_____"
],
[
"Se= df.loc[df.variety =='Setosa', :]\nVc= df.loc[df.variety =='Versicolor', :]\nVi= df.loc[df.variety =='Virginica', :]\n\nplt.figure(figsize=(16,8))\n\nplt.subplot(1, 2, 1)\nplt.hist(Se.petal_hypotenuse,bins=10,color=\"steelblue\",edgecolor='black',alpha =0.4 , label=\"Setosa\")\nplt.hist(Vc.petal_hypotenuse,bins=10,color='red',edgecolor='black', alpha =0.3, label=\"Versicolor\")\nplt.hist(Vi.petal_hypotenuse,bins=10,color='blue',edgecolor='black', alpha =0.3, label=\"Virginica\")\nplt.legend()\nplt.title(\"petal hypotenuse distribution\"), plt.xlabel('cm')\n\nplt.subplot(1, 2, 2)\nplt.hist(Se.petal_product,bins=10,color=\"steelblue\",edgecolor='black',alpha =0.4, label=\"Setosa\")\nplt.hist(Vc.petal_product,bins=10,color='red',edgecolor='black', alpha =0.3, label=\"Versicolor\")\nplt.hist(Vi.petal_product,bins=10,color='blue',edgecolor='black', alpha =0.3, label=\"Virginica\")\nplt.legend()\nplt.title(\"petal product distribution\"), plt.xlabel('cm')",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,10))\n\narea = np.pi*20\n\nplt.scatter(Se.petal_hypotenuse,Se.petal_product, s=area, c=\"steelblue\", alpha=0.6, label=\"Setosa\")\nplt.scatter(Vc.petal_hypotenuse,Vc.petal_product, s=area, c=\"red\", alpha=0.6, label=\"Versicolor\")\nplt.scatter(Vi.petal_hypotenuse,Vi.petal_product, s=area, c=\"blue\", alpha=0.5, label=\"Virginica\")\nplt.title(\"petal hypotenuse Vs petal product\"), plt.xlabel('cm'), plt.ylabel('cm^2')\nplt.legend()",
"_____no_output_____"
]
],
[
[
"\n### Train with Engineered features",
"_____no_output_____"
],
[
"Now, let's replace two petal features with two new features we generated. ",
"_____no_output_____"
]
],
[
[
"df.head()",
"_____no_output_____"
],
[
"df2 = df.loc[:,[\"sepal_length\",\"sepal_width\",\"petal_hypotenuse\",\"petal_product\",\"variety\"]]\ndf2.dtypes",
"_____no_output_____"
],
[
"train_X, test_X, train_y, test_y = train_test_split(df2[df2.columns[0:4]].values,\n df2.variety.values, test_size=0.25)\n\nfrom sklearn.tree import DecisionTreeClassifier\nmodelDT = DecisionTreeClassifier().fit(train_X, train_y)\nDT_predicted = modelDT.predict(test_X)\n\nfrom sklearn.ensemble import RandomForestClassifier\nmodelRF = RandomForestClassifier().fit(train_X, train_y)\nRF_predicted = modelRF.predict(test_X)",
"_____no_output_____"
],
[
"print(metrics.classification_report(DT_predicted, test_y))\n# print(metrics.classification_report(RF_predicted, test_y))",
" precision recall f1-score support\n\n Setosa 1.00 1.00 1.00 14\n Versicolor 0.90 1.00 0.95 9\n Virginica 1.00 0.93 0.97 15\n\n accuracy 0.97 38\n macro avg 0.97 0.98 0.97 38\nweighted avg 0.98 0.97 0.97 38\n\n"
],
[
"from sklearn.metrics import confusion_matrix\nimport seaborn as sns\n\nmat = confusion_matrix(test_y, DT_predicted)\n# mat = confusion_matrix(test_y, RF_predicted)\nsns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)\nplt.xlabel('true label')\nplt.ylabel('predicted label');",
"_____no_output_____"
]
],
[
[
"### ",
"_____no_output_____"
],
[
"Reference - [Python Data Science Handbook](https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/05.08-Random-Forests.ipynb)",
"_____no_output_____"
],
[
"<a id=annotate></a>\n## Annotations",
"_____no_output_____"
],
[
"#### Image labels",
"_____no_output_____"
],
[
"For classification models, we have a single label for each set of images in the same class. Annotations can be made very easily.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"#### Bounding boxes",
"_____no_output_____"
],
[
"We usually use rectangular bounding boxes for object detection. Detection Models like YOLO and Faster-RCNN use this type of annotations. Bounding boxes are ususally represented by either the coordinates (x1,y1) lower left corner or (x2,y2) upper right corner of the box, followed by height and wigth of the bounding box.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"#### Segmentation",
"_____no_output_____"
],
[
"##### Polygonal Segmentation",
"_____no_output_____"
],
[
"Bounding boxes are simple but not ideal for all types of objects as we have to frame every object in a rectangular box. To solve this problem, polygonal segmentation is introduced. With this method, we can annotate the exact features of the objects with polygons. The image below is from one of my projects for segmentation of temples in ASEAN.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"##### Semantic Segmentation",
"_____no_output_____"
],
[
"This technique takes segmentation to the pixel level. A particular class is assigned to every pixel in the image. Semantic segmentation is used mainly in situations where there is a very significant environmental context. It is used, for instance, in self-driving cars and robotics so that the models understand the environment in which they operate.",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"<a id=imagedata></a>\n### Image Datasets",
"_____no_output_____"
],
[
"[COCO dataset](https://cocodataset.org/)\n\n[Google's Open images V6](https://storage.googleapis.com/openimages/web/index.html)\n\n[ImageNet](http://www.image-net.org/)\n\n[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html)\n\n[MNIST](http://yann.lecun.com/exdb/mnist/)",
"_____no_output_____"
],
[
"[PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html)",
"_____no_output_____"
],
[
"<a id=tools></a>\n### Annotation tools",
"_____no_output_____"
],
[
"[Makesense](makesense.ai)\n\n[LabelImg](https://github.com/tzutalin/labelImg)",
"_____no_output_____"
],
[
"### References\n\n[Tensorflow](https://www.tensorflow.org/tutorials/images/classification), [Google images](https://www.google.com/imghp?hl=en)\n, [Sabina Pokhrel's article](https://towardsdatascience.com/image-data-labelling-and-annotation-everything-you-need-to-know-86ede6c684b1)\n, [Cityscapes dataset](https://www.cityscapes-dataset.com/examples/#coarse-annotations)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e79c9a874521d331a42ef57c2a8ae4316b7dcdf0 | 80,706 | ipynb | Jupyter Notebook | notebooks/data_preprocessing.ipynb | SudoHead/movie-classifier | 274bb69db9e8861ac35ce5ff246f4c3a787f8c44 | [
"MIT"
] | null | null | null | notebooks/data_preprocessing.ipynb | SudoHead/movie-classifier | 274bb69db9e8861ac35ce5ff246f4c3a787f8c44 | [
"MIT"
] | null | null | null | notebooks/data_preprocessing.ipynb | SudoHead/movie-classifier | 274bb69db9e8861ac35ce5ff246f4c3a787f8c44 | [
"MIT"
] | null | null | null | 117.305233 | 34,676 | 0.83544 | [
[
[
"# Data Preprocessing\n\nThis notebook shows ",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport re\nimport json\nimport nltk\nfrom nltk.corpus import wordnet\nimport sklearn\nimport seaborn as sns\nimport unicodedata\nimport inflect\n\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\nnltk.download('stopwords')",
"[nltk_data] Downloading package punkt to /home/jovyan/nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n[nltk_data] Downloading package wordnet to /home/jovyan/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n[nltk_data] Downloading package averaged_perceptron_tagger to\n[nltk_data] /home/jovyan/nltk_data...\n[nltk_data] Package averaged_perceptron_tagger is already up-to-\n[nltk_data] date!\n[nltk_data] Downloading package stopwords to /home/jovyan/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n"
]
],
[
[
"Load the data:",
"_____no_output_____"
]
],
[
[
"path = '../data/movies_metadata.csv'\n\ndf = pd.read_csv(path)",
"_____no_output_____"
],
[
"df = pd.concat([df['release_date'], df['title'], df['overview'], df['genres']], axis=1)\n\n# remove duplicates\nduplicate_rows = df[df.duplicated()]\ndf.drop(duplicate_rows.index, inplace=True)",
"_____no_output_____"
]
],
[
[
"#### Drop the NaN rows where either title or overview is NaN",
"_____no_output_____"
]
],
[
[
"# convert empty string to NaN\ndf['overview'].replace('', np.nan, inplace=True)\ndf.dropna(subset=['release_date', 'title', 'overview'], inplace=True)\n\n# the release date is no longer necessary, because NaN are cleared\ndel df['release_date']",
"_____no_output_____"
]
],
[
[
"#### Drop rows with no overview info or blank",
"_____no_output_____"
]
],
[
[
"reg_404 = \"^not available|^no overview\"\noverview_not_found = df['overview'].str.contains(reg_404, regex=True, flags=re.IGNORECASE)\noverview_blank = df['overview'].str.isspace()\n\ndf.drop(df[overview_not_found].index, inplace=True)\ndf.drop(df[overview_blank].index, inplace=True)\ndf.head()",
"/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:6: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n \n"
]
],
[
[
"#### Transform column genre",
"_____no_output_____"
]
],
[
[
"def extract_genres(genres_str):\n genres_str = genres_str.replace(\"'\", '\\\"')\n genres_json = json.loads(genres_str)\n genres_list = []\n for elem in genres_json:\n genres_list.append(elem['name'])\n return genres_list",
"_____no_output_____"
],
[
"# remove rows with no genres, since they don't provide any information\ndf.drop(df[df['genres'] == '[]'].index, inplace=True)\n\n# transform genres from string to list\ntemp_genre = df['genres'].apply(extract_genres)",
"_____no_output_____"
],
[
"# test conversion to list went ok\ng_set = set()\nfor i, row in df['genres'].iteritems():\n reg = ''\n for genre in temp_genre[i]:\n reg = reg + '(?=.*' + genre + ')'\n g_set.add(genre)\n if not re.search(reg, row) or len(temp_genre[i]) == 0:\n print('FAILED: at i =', i , row)\n print(reg)\n break",
"_____no_output_____"
],
[
"df['genres'] = temp_genre",
"_____no_output_____"
]
],
[
[
"#### Visualise movie genres' distribution",
"_____no_output_____"
]
],
[
[
"all_genres = sum(df['genres'], [])\ngenre_types = set(all_genres)\nlen(genre_types)",
"_____no_output_____"
],
[
"all_genres = nltk.FreqDist(all_genres) \n\n# create dataframe\nall_genres_df = pd.DataFrame({'Genre': list(all_genres.keys()), \n 'Count': list(all_genres.values())})\n\ng = all_genres_df.nlargest(columns=\"Count\", n = 50) \nplt.figure(figsize=(12,15)) \nax = sns.barplot(data=g, x= \"Count\", y = \"Genre\") \nplt.show()",
"_____no_output_____"
]
],
[
[
"## Text Preprocessing",
"_____no_output_____"
]
],
[
[
"def to_lower(text):\n return text.lower()\n\ndef remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence\n\ndef remove_stopwords(tokens):\n words = []\n for word in tokens:\n if word not in nltk.corpus.stopwords.words('english'):\n words.append(word)\n return words\n\ndef replace_nums2words(tokens):\n e = inflect.engine()\n words = []\n for word in tokens:\n if word.isdigit():\n words.append(e.number_to_words(word).replace(',', ''))\n else:\n words.append(word)\n return words\n\ndef lemmatisation(tokens):\n pos_tag = nltk.pos_tag(tokens)\n lemmatiser = nltk.WordNetLemmatizer()\n wornet_tags = {\"J\": wordnet.ADJ, \"N\": wordnet.NOUN, \"V\": wordnet.VERB, \"R\": wordnet.ADV}\n words = []\n for word, tag in pos_tag:\n proper_tag = wornet_tags.get(tag[0].upper(), wordnet.NOUN)\n words.append(lemmatiser.lemmatize(word, proper_tag))\n return words\n\ndef text_preprocessing(text):\n # 1. Transform all characters in lowercase\n text = to_lower(text)\n\n # 2. Replace all compatibility characters with their equivalents (i.e. accented)\n text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8')\n\n # 3. Remove special characters (punctuation, extra spaces)\n text = remove_specials(text)\n\n # 4. Tokenization\n toks = nltk.word_tokenize(text)\n\n # 5. Stopwords removal\n toks = remove_stopwords(toks)\n\n # 5. Convert to number to text representation\n toks = replace_nums2words(toks)\n \n # 6. Lemmatisation\n# toks = lemmatisation(toks)\n\n return toks",
"_____no_output_____"
],
[
"df['overview'] = df['overview'].apply(text_preprocessing)",
"_____no_output_____"
],
[
"def flatten_overview_words(column):\n all_words = []\n for overview in column.values.tolist():\n for word in overview:\n all_words.append(word)\n return all_words\n\ndef freq_words(x, terms = 30):\n fdist = nltk.FreqDist(x) \n words_df = pd.DataFrame({'word':list(fdist.keys()), 'count':list(fdist.values())}) \n\n # selecting top 20 most frequent words \n d = words_df.nlargest(columns=\"count\", n = terms) \n\n # visualize words and frequencies\n plt.figure(figsize=(12,15)) \n ax = sns.barplot(data=d, x= \"count\", y = \"word\") \n ax.set(ylabel = 'Word') \n plt.show()\n \n# print 100 most frequent words \nfreq_words(flatten_overview_words(df['overview']), 50)",
"_____no_output_____"
],
[
"new_df = df['title']\nstr_overview = df['overview'].apply(lambda x: ' '.join(x))\nnew_df = pd.concat([new_df, str_overview], axis=1)\nnew_df = pd.concat([new_df, df['genres']], axis=1)\nnew_df['genres'] = new_df['genres'].apply(lambda x: ','.join(x))\nnew_df['overview'] = new_df['title'].apply(to_lower).astype(str) + ' ' + new_df ['overview']\nnew_df",
"_____no_output_____"
]
],
[
[
"Save the processed data:",
"_____no_output_____"
]
],
[
[
"# new_df.to_csv(\"../data/movies_data_ready.csv\", index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79ca12fea796dc5c8912bbeca2ea9b7fd2b7aea | 47,122 | ipynb | Jupyter Notebook | Results/Jupyter/ML/ex1.ipynb | in2dblue/interactive-rl | fd4fba495096e3ca63a6c82ffcfe28dc90a74d65 | [
"MIT"
] | null | null | null | Results/Jupyter/ML/ex1.ipynb | in2dblue/interactive-rl | fd4fba495096e3ca63a6c82ffcfe28dc90a74d65 | [
"MIT"
] | null | null | null | Results/Jupyter/ML/ex1.ipynb | in2dblue/interactive-rl | fd4fba495096e3ca63a6c82ffcfe28dc90a74d65 | [
"MIT"
] | null | null | null | 266.225989 | 42,832 | 0.922329 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"np.random.seed(100)\nx=np.random.random(100)\nepsilon=np.random.uniform(-0.3,0.3,100)\ny=np.sin(2*np.pi*x)+epsilon\n\nalpha=np.arange(0, 0.1, 0.01)\nnp.random.seed(5)\ntheta=np.random.uniform(-0.5,0.5,4)\nh=np.empty_like(y)\nerror=np.empty(5000)\nmin_err=100.0\nh_init=np.empty_like(y)\nh_init=theta[0]*1 + theta[1]*x + theta[2]*np.power(x,2) + theta[3]*np.power(x,3)",
"_____no_output_____"
]
],
[
[
"# Model Training",
"_____no_output_____"
]
],
[
[
"for a in alpha:\n for i in range(5000):\n for j in range(x.size):\n h[j]=theta[0]*1 + theta[1]*x[j] + theta[2]*np.power(x[j],2) + theta[3]*np.power(x[j],3)\n theta=theta+a*(y[j]-h[j])*np.array([1, x[j], np.power(x[j],2), np.power(x[j],3)])\n if a==0.03:\n error[i]=np.sum(np.square(h-y))/2\n err=np.sum(np.square(h-y))\n print('Error with learning rate {} is {} '.format(a, err))\n if err<min_err:\n min_err=err\n best_h=h\n best_theta=theta\n best_alpha=a",
"error in loop 0.0 is 73.56447519724891 \nerror in loop 0.01 is 9.562054469232352 \nerror in loop 0.02 is 3.957172871131731 \nerror in loop 0.03 is 3.2902331120767023 \nerror in loop 0.04 is 3.3135000045194456 \nerror in loop 0.05 is 3.3617673853208876 \nerror in loop 0.06 is 3.409621997732504 \nerror in loop 0.07 is 3.4575060021506236 \nerror in loop 0.08 is 3.505500517252877 \nerror in loop 0.09 is 3.5536050756746214 \n"
],
[
"print('min_err: ',min_err)\nprint('best_theta: ',best_theta)\nprint('best_alpha: ',best_alpha)\nerror",
"min_err: 3.2902331120767023\nbest_theta: [ -0.0647765 10.96884442 -32.59145249 21.84947491]\nbest_alpha: 0.03\n"
],
[
"f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))\n\nax1.plot(x, y, 'o', label='Actual datapoints')\nax1.plot(x, h_init, 'go', label='Initial model prediction')\nax1.plot(x, best_h, 'ro', label='Final model prediction')\nax1.legend()\n\nax2.plot(error)\nax2.set_title('Error curve')\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e79ca9981f89f3f91e43c1cc3171d80c0fcfd1f9 | 117,862 | ipynb | Jupyter Notebook | docs/notebooks/divergence/BugDetection.ipynb | FINRAOS/model-validation-toolkit | 57bf478712649e08635b2a6952c5b60ecf189ca5 | [
"Apache-2.0"
] | 27 | 2021-12-13T20:07:30.000Z | 2022-03-25T20:14:10.000Z | docs/notebooks/divergence/BugDetection.ipynb | FINRAOS/model-validation-toolkit | 57bf478712649e08635b2a6952c5b60ecf189ca5 | [
"Apache-2.0"
] | null | null | null | docs/notebooks/divergence/BugDetection.ipynb | FINRAOS/model-validation-toolkit | 57bf478712649e08635b2a6952c5b60ecf189ca5 | [
"Apache-2.0"
] | 2 | 2021-12-10T12:42:05.000Z | 2022-03-21T03:44:08.000Z | 167.417614 | 54,804 | 0.855704 | [
[
[
"# Dataset Bug Detection\n\nIn this example, we will demonstrate how to detect bugs in a data set using the public Airlines data set.",
"_____no_output_____"
]
],
[
[
"# Since we use the category_encoders library to perform binary encoding on some of the features in this demo, \n# we'll need to install it.\n!pip install category_encoders",
"_____no_output_____"
],
[
"import pandas\npandas.options.display.max_rows=5 # restrict to 5 rows on display\n\ndf = pandas.read_csv(\"https://raw.githubusercontent.com/Devvrat53/Flight-Delay-Prediction/master/Data/flight_data.csv\")\ndf['date'] = pandas.to_datetime(df[['year', 'month', 'day']])\ndf['day_index'] = (df['date'] - df['date'].min()).dt.days\ndf['DayOfWeek'] = df['date'].dt.day_name()\ndf['Month'] = df['date'].dt.month_name()\ndf",
"_____no_output_____"
]
],
[
[
"## Prepare daily data\n\nLet's assume that we run new data each day through our model. For simplicity we will just look at the last 10 days of data.",
"_____no_output_____"
]
],
[
[
"df_daily = df[df['month'] > 11]",
"_____no_output_____"
],
[
"df_daily = df_daily[df_daily['day'] > 20]",
"_____no_output_____"
],
[
"df_daily",
"_____no_output_____"
]
],
[
[
"## Bug Detection\n\nNow we want to find any bugs in any of our daily sets of data that we feed to our model.\nNote that we are performing binary encoding on the categorical columns (carrier, origin, and dest) so that we can pass the data to the variational estimation function directly. We are doing this for performance reasons vs. the hybrid estimation, and to strike a balance between plain index encoding and one-hot encoding.",
"_____no_output_____"
]
],
[
[
"import category_encoders as ce\nfrom mvtk.supervisor.utils import compute_divergence_crosstabs\nfrom mvtk.supervisor.divergence import calc_tv_knn\n\ncolumns = ['dep_time', 'sched_dep_time', 'dep_delay', 'arr_time', 'sched_arr_time', 'arr_delay', 'air_time', 'distance', 'hour', 'minute', 'carrier', 'origin', 'dest']\n\nencoder = ce.BinaryEncoder(cols=['carrier', 'origin', 'dest'])\nencoder.fit(df_daily[columns + ['day']])\ndf_daily_encoded = encoder.transform(df_daily[columns + ['day']].fillna(0))\n\nf = lambda x, y: calc_tv_knn(x, y, k = 26)\nresult = compute_divergence_crosstabs(df_daily_encoded, datecol='day', divergence=f)",
"WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"
],
[
"import matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.heatmap(result, cmap='coolwarm', linewidths=0.30, annot=True)\nplt.show()",
"_____no_output_____"
]
],
[
[
"As you can see from the heatmap above, although there are some divergences between the days, there is nothing that is too alarming.\n\nLet's now update our data set to contain a \"bug\" in the \"sched_dep_time\" feature. For day 30, all of the values of that feature are null (which we are then translating to 0).",
"_____no_output_____"
]
],
[
[
"df_daily.loc[df_daily['day'] == 30, ['sched_dep_time']] = None",
"_____no_output_____"
]
],
[
[
"Below is the percentage of scheduled departure times that are empty per day in our updated daily data set",
"_____no_output_____"
]
],
[
[
"day = 21\nfor df_day in df_daily.groupby('day'):\n day_pct = df_day[1]['sched_dep_time'].value_counts(normalize=True, dropna=False) * 100\n pct = day_pct.loc[day_pct.index.isnull()].values\n if (len(pct) == 0):\n pct = 0\n else:\n pct = pct[0]\n print('Day ' + str(day) + ': ' + str(round(pct)) + '%')\n day += 1",
"Day 21: 0%\nDay 22: 0%\nDay 23: 0%\nDay 24: 0%\nDay 25: 0%\nDay 26: 0%\nDay 27: 0%\nDay 28: 0%\nDay 29: 0%\nDay 30: 100%\nDay 31: 0%\n"
],
[
"from mvtk.supervisor.divergence import calc_tv_knn\n\nencoder = ce.BinaryEncoder(cols=['carrier', 'origin', 'dest'])\nencoder.fit(df_daily[columns + ['day']])\ndf_daily_encoded = encoder.transform(df_daily[columns + ['day']].fillna(0))\n\nf = lambda x, y: calc_tv_knn(x, y, k = 26)\nresult = compute_divergence_crosstabs(df_daily_encoded, datecol='day', divergence=f)",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.heatmap(result, cmap='coolwarm', linewidths=0.30, annot=True)\nplt.show()",
"_____no_output_____"
]
],
[
[
"As we can see above, our heatmap now clearly shows that we have a \"bug\" in our day 30 dataset.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e79ca9e010271a9bff574398cd5596a74ef0b107 | 2,268 | ipynb | Jupyter Notebook | solutions/world_cup_soln.ipynb | chwebster/ThinkBayes2 | 49af0e36c38c2656d7b91117cfa2b019ead81988 | [
"MIT"
] | 1,337 | 2015-01-06T06:23:55.000Z | 2022-03-31T21:06:21.000Z | solutions/world_cup_soln.ipynb | chwebster/ThinkBayes2 | 49af0e36c38c2656d7b91117cfa2b019ead81988 | [
"MIT"
] | 43 | 2015-04-23T13:14:15.000Z | 2022-01-04T12:55:59.000Z | solutions/world_cup_soln.ipynb | chwebster/ThinkBayes2 | 49af0e36c38c2656d7b91117cfa2b019ead81988 | [
"MIT"
] | 1,497 | 2015-01-13T22:05:32.000Z | 2022-03-30T09:19:53.000Z | 18.590164 | 84 | 0.521164 | [
[
[
"# Think Bayes\n\nThis notebook presents example code and exercise solutions for Think Bayes.\n\nCopyright 2018 Allen B. Downey\n\nMIT License: https://opensource.org/licenses/MIT",
"_____no_output_____"
]
],
[
[
"# Configure Jupyter so figures appear in the notebook\n%matplotlib inline\n\n# Configure Jupyter to display the assigned value after an assignment\n%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'\n\n# import classes from thinkbayes2\nfrom thinkbayes2 import Hist, Pmf, Suite, Beta\nimport thinkplot\n\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"## Section\n\nxxx",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e79caa55742721b95747d5905155d79de0544abc | 29,979 | ipynb | Jupyter Notebook | ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb | ICCTerasmus/ICCT | fcd56ab6b5fddc00f72521cc87accfdbec6068f6 | [
"BSD-3-Clause"
] | 6 | 2021-05-22T18:42:14.000Z | 2021-10-03T14:10:22.000Z | ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb | ICCTerasmus/ICCT | fcd56ab6b5fddc00f72521cc87accfdbec6068f6 | [
"BSD-3-Clause"
] | null | null | null | ICCT_it/examples/04/.ipynb_checkpoints/SS-40-Controllo_di_un_braccio_robotico_con_giunto_flessibile-checkpoint.ipynb | ICCTerasmus/ICCT | fcd56ab6b5fddc00f72521cc87accfdbec6068f6 | [
"BSD-3-Clause"
] | 2 | 2021-05-24T11:40:09.000Z | 2021-08-29T16:36:18.000Z | 37.614806 | 463 | 0.481804 | [
[
[
"#remove cell visibility\nfrom IPython.display import HTML\ntag = HTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide()\n } else {\n $('div.input').show()\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\nToggle cell visibility <a href=\"javascript:code_toggle()\">here</a>.''')\ndisplay(tag)",
"_____no_output_____"
],
[
"%matplotlib inline\nimport control\nimport numpy\nimport sympy as sym\nfrom IPython.display import display, Markdown\nimport ipywidgets as widgets\nimport matplotlib.pyplot as plt\n\n\n#print a matrix latex-like\ndef bmatrix(a):\n \"\"\"Returns a LaTeX bmatrix - by Damir Arbula (ICCT project)\n\n :a: numpy array\n :returns: LaTeX bmatrix as a string\n \"\"\"\n if len(a.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{bmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{bmatrix}']\n return '\\n'.join(rv)\n\n\n# Display formatted matrix: \ndef vmatrix(a):\n if len(a.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{vmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{vmatrix}']\n return '\\n'.join(rv)\n\n\n#matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value !\nclass matrixWidget(widgets.VBox):\n def updateM(self,change):\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.M_[irow,icol] = self.children[irow].children[icol].value\n #print(self.M_[irow,icol])\n self.value = self.M_\n\n def dummychangecallback(self,change):\n pass\n \n \n def __init__(self,n,m):\n self.n = n\n self.m = m\n self.M_ = numpy.matrix(numpy.zeros((self.n,self.m)))\n self.value = self.M_\n widgets.VBox.__init__(self,\n children = [\n widgets.HBox(children = \n [widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)]\n ) \n for j in range(n)\n ])\n \n #fill in widgets and tell interact to call updateM each time a children changes value\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].value = self.M_[irow,icol]\n self.children[irow].children[icol].observe(self.updateM, names='value')\n #value = Unicode('[email protected]', help=\"The email value.\").tag(sync=True)\n self.observe(self.updateM, names='value', type= 'All')\n \n def setM(self, newM):\n #disable callbacks, change values, and reenable\n self.unobserve(self.updateM, names='value', type= 'All')\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].unobserve(self.updateM, names='value')\n self.M_ = newM\n self.value = self.M_\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].value = self.M_[irow,icol]\n for irow in range(0,self.n):\n for icol in range(0,self.m):\n self.children[irow].children[icol].observe(self.updateM, names='value')\n self.observe(self.updateM, names='value', type= 'All') \n\n #self.children[irow].children[icol].observe(self.updateM, names='value')\n\n \n#overlaod class for state space systems that DO NOT remove \"useless\" states (what \"professor\" of automatic control would do this?)\nclass sss(control.StateSpace):\n def __init__(self,*args):\n #call base class init constructor\n control.StateSpace.__init__(self,*args)\n #disable function below in base class\n def _remove_useless_states(self):\n pass",
"_____no_output_____"
]
],
[
[
"## Controllo di un braccio robotico con giunto flessibile\n\nUn collegamento di un braccio robotico è azionato da un motore elettrico tramite un giunto flessibile che si comporta come una molla torsionale. La dinamica del sistema può essere approssimata con un sistema lineare tempo invariante del terzo ordine in cui gli stati sono:\n- $x_1$: differenza tra gli angoli del motore e del braccio (non è nulla a causa della flessibilità dell'articolazione),\n- $x_2$: velocità angolare dell'albero motore,\n- $x_3$: velocità angolare del link.\nL'input $u$ è la coppia del motore. Le equazioni dinamiche sono:\n\n\\begin{cases}\n\\dot{x} = \\begin{bmatrix} 0 & 1 & -1 \\\\ a-1 & -b_1 & b_1 \\\\ a & b_2 & -b_2 \\end{bmatrix}x + \\begin{bmatrix} 0 \\\\ b_3 \\\\ 0 \\end{bmatrix}u \\\\\ny = \\begin{bmatrix} 0 & 0 & 1 \\end{bmatrix}x\n\\end{cases}\n\ncon $a=0.1$, $b_1=0.09$, $b_2=0.01$ e $b_3=90$.\n\nL'obiettivo del progetto del sistema di controllo è quello di regolare la velocità angolare del collegamento in modo da avere poli dominanti con smorzamento pari a 0,7 e frequenza naturale pari a 0,5 rad/s e errore di regime nullo in risposta ad un gradino di velocità di riferimento.\n\nLa funzione di trasferimento del sistema è:",
"_____no_output_____"
]
],
[
[
"A = numpy.matrix('0 1 -1; -0.9 -0.09 0.09; 0.1 0.01 -0.01')\nB = numpy.matrix('0; 90; 0')\nC = numpy.matrix('0 0 1')\nD = numpy.matrix('0')\nsys_tf = control.tf(sss(A,B,C,D))\nprint(sys_tf)",
"\n 3.886e-16 s^2 + 0.9 s + 9\n-----------------------------\ns^3 + 0.1 s^2 + s - 8.674e-19\n\n"
]
],
[
[
"con poli",
"_____no_output_____"
]
],
[
[
"import warnings\n# In order to suppress the warning BadCoefficient\nwarnings.filterwarnings(\"ignore\")\nprint(numpy.round(sys_tf.pole(),3))",
"[-0.05+0.999j -0.05-0.999j 0. +0.j ]\n"
]
],
[
[
"e zeri",
"_____no_output_____"
]
],
[
[
"print(numpy.round(sys_tf.zero(),3),'.')",
"[-2.31613695e+15 -1.00000000e+01] .\n"
]
],
[
[
"Innanzitutto, si analizza il sistema per verificare se è controllabile e osservabile. La matrice di controllabilità $\\mathcal{C}$ è",
"_____no_output_____"
]
],
[
[
"Ctrb = control.ctrb(A,B)\ndisplay(Markdown(bmatrix(Ctrb)))\n# print(numpy.linalg.matrix_rank(Ctrb))",
"_____no_output_____"
]
],
[
[
"e ha rango pari a 3 quindi il sistema è controllabile. La matrice di osservabilità $\\mathcal{O}$ è",
"_____no_output_____"
]
],
[
[
"Obsv = control.obsv(A,C)\ndisplay(Markdown(bmatrix(Obsv)))\n# print(numpy.linalg.matrix_rank(Obsv))",
"_____no_output_____"
]
],
[
[
"e ha rango pari a 3 quindi il sistema è osservabile.\n\nCiò potrebbe essere effettivamente dedotto dal fatto che il denominatore della funzione di trasferimento è del terzo ordine (uguale alla dimensione del vettore nello spazio degli stati).\n\n### Design del regolatore\n#### Design del controller\n\nDati i requisiti, si sa che si devono posizionare 2 poli in $\\zeta \\omega_n \\pm \\sqrt{1-\\zeta^2}\\omega_n = -0.35\\pm0.357i$ e posizionare il polo reale rimanente a una frequenza superiore a quella dei poli complessi (dominanti). Si può scegliere di posizionare il terzo polo in -3,5 rad/s. Per il requisito dell'errore di regime nullo, si scala il segnale di riferimento con un guadagno uguale all'inverso del guadagno del sistema ad anello chiuso.\n\n#### Design dell'osservatore\n\nPer avere un osservatore che assista rapidamente il controllore semplicemente si posizionano i poli in circa -10 rad/s.\n\n### Come usare questo notebook?\n- Verifica se il sistema a ciclo chiuso funziona bene anche in caso di errore di stima nello stato iniziale. Prova a migliorare le prestazioni.\n- Riduci la frequenza del polo reale del sistema a ciclo chiuso controllato e osserva come la risposta differisce dal riferimento.",
"_____no_output_____"
]
],
[
[
"# Preparatory cell\n\nX0 = numpy.matrix('0.0; 0.0; 0.0')\nK = numpy.matrix([8/15,-4.4,-4])\nL = numpy.matrix([[23],[66],[107/3]])\n\nAw = matrixWidget(3,3)\nAw.setM(A)\nBw = matrixWidget(3,1)\nBw.setM(B)\nCw = matrixWidget(1,3)\nCw.setM(C)\nX0w = matrixWidget(3,1)\nX0w.setM(X0)\nKw = matrixWidget(1,3)\nKw.setM(K)\nLw = matrixWidget(3,1)\nLw.setM(L)\n\n\neig1c = matrixWidget(1,1)\neig2c = matrixWidget(2,1)\neig3c = matrixWidget(1,1)\neig1c.setM(numpy.matrix([-3.5])) \neig2c.setM(numpy.matrix([[-0.35],[-0.357]]))\neig3c.setM(numpy.matrix([-3.5]))\n\neig1o = matrixWidget(1,1)\neig2o = matrixWidget(2,1)\neig3o = matrixWidget(1,1)\neig1o.setM(numpy.matrix([-10.])) \neig2o.setM(numpy.matrix([[-10.],[0.]]))\neig3o.setM(numpy.matrix([-10.]))",
"_____no_output_____"
],
[
"# Misc\n\n#create dummy widget \nDW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px'))\n\n#create button widget\nSTART = widgets.Button(\n description='Test',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltip='Test',\n icon='check'\n)\n \ndef on_start_button_clicked(b):\n #This is a workaround to have intreactive_output call the callback:\n # force the value of the dummy widget to change\n if DW.value> 0 :\n DW.value = -1\n else: \n DW.value = 1\n pass\nSTART.on_click(on_start_button_clicked)\n\n# Define type of method \nselm = widgets.Dropdown(\n options= [('Imposta K e L','Set K and L'), ('Imposta gli autovalori','Set the eigenvalues')],\n value= 'Set the eigenvalues',\n description='',\n disabled=False\n)\n\n# Define the number of complex eigenvalues\nsele = widgets.Dropdown(\n options= [('0 autovalori complessi','0 complex eigenvalues'), ('2 autovalori complessi','2 complex eigenvalues')],\n value= '2 complex eigenvalues',\n description='Autovalori complessi:',\n style = {'description_width': 'initial'},\n disabled=False\n)\n\n#define type of ipout \nselu = widgets.Dropdown(\n options=[('impulso','impulse'), ('gradino','step'), ('sinusoide','sinusoid'), ('onda quadra','square wave')],\n value='step',\n description='Riferimento:',\n style = {'description_width': 'initial'},\n disabled=False\n)\n# Define the values of the input\nu = widgets.FloatSlider(\n value=1,\n min=0,\n max=3,\n step=0.1,\n description='Riferimento:',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.1f',\n)\nperiod = widgets.FloatSlider(\n value=0.5,\n min=0.001,\n max=10,\n step=0.001,\n description='Periodo: ',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.2f',\n)\n\ngain_w2 = widgets.FloatText(\n value=1.,\n# description='',\n description='Guadagno inverso del riferimento:',\n style = {'description_width': 'initial'},\n disabled=True\n)\n\nsimTime = widgets.FloatText(\n value=20,\n description='Tempo di simulazione (s):',\n style = {'description_width': 'initial'},\n disabled=False\n)",
"_____no_output_____"
],
[
"# Support functions\n\ndef eigen_choice(sele):\n if sele == '0 complex eigenvalues':\n eig1c.children[0].children[0].disabled = False\n eig2c.children[1].children[0].disabled = True\n eig1o.children[0].children[0].disabled = False\n eig2o.children[1].children[0].disabled = True\n eig = 0\n if sele == '2 complex eigenvalues':\n eig1c.children[0].children[0].disabled = True\n eig2c.children[1].children[0].disabled = False\n eig1o.children[0].children[0].disabled = True\n eig2o.children[1].children[0].disabled = False\n eig = 2\n return eig\n\ndef method_choice(selm):\n if selm == 'Set K and L':\n method = 1\n sele.disabled = True\n if selm == 'Set the eigenvalues':\n method = 2\n sele.disabled = False\n return method",
"_____no_output_____"
],
[
"s = control.tf('s')\nGref = (0.5)**2/(s**2 + 2*0.7*0.5*s + (0.5)**2)\n\ndef main_callback2(Aw, Bw, X0w, K, L, eig1c, eig2c, eig3c, eig1o, eig2o, eig3o, u, period, selm, sele, selu, simTime, DW):\n eige = eigen_choice(sele)\n method = method_choice(selm)\n \n if method == 1:\n solc = numpy.linalg.eig(A-B*K)\n solo = numpy.linalg.eig(A-L*C)\n if method == 2:\n if eige == 0:\n K = control.acker(A, B, [eig1c[0,0], eig2c[0,0], eig3c[0,0]])\n Kw.setM(K)\n L = control.acker(A.T, C.T, [eig1o[0,0], eig2o[0,0], eig3o[0,0]]).T\n Lw.setM(L)\n if eige == 2:\n K = control.acker(A, B, [eig3c[0,0], \n numpy.complex(eig2c[0,0],eig2c[1,0]), \n numpy.complex(eig2c[0,0],-eig2c[1,0])])\n Kw.setM(K)\n L = control.acker(A.T, C.T, [eig3o[0,0], \n numpy.complex(eig2o[0,0],eig2o[1,0]), \n numpy.complex(eig2o[0,0],-eig2o[1,0])]).T\n Lw.setM(L)\n \n \n sys = control.ss(A,B,numpy.vstack((C,numpy.zeros((B.shape[1],C.shape[1])))),numpy.vstack((D,numpy.eye(B.shape[1]))))\n sysC = control.ss(numpy.zeros((1,1)),\n numpy.zeros((1,numpy.shape(A)[0])),\n numpy.zeros((numpy.shape(B)[1],1)),\n -K)\n \n sysE = control.ss(A-L*C,\n numpy.hstack((L,B-L*D)),\n numpy.eye(numpy.shape(A)[0]),\n numpy.zeros((A.shape[0],C.shape[0]+B.shape[1])))\n \n sys_append = control.append(sys, sysE, sysC, control.ss(A,B,numpy.eye(A.shape[0]),numpy.zeros((A.shape[0],B.shape[1]))))\n Q = []\n # y in ingresso a sysE\n for i in range(C.shape[0]):\n Q.append([B.shape[1]+i+1, i+1])\n # u in ingresso a sysE\n for i in range(B.shape[1]):\n Q.append([B.shape[1]+C.shape[0]+i+1, C.shape[0]+i+1])\n # u in ingresso a sys\n for i in range(B.shape[1]):\n Q.append([i+1, C.shape[0]+B.shape[1]+A.shape[0]+i+1])\n # u in ingresso al sistema che ha come uscite gli stati reali\n for i in range(B.shape[1]):\n Q.append([2*B.shape[1]+C.shape[0]+A.shape[0]+i+1, C.shape[0]+i+1])\n # xe in ingresso a sysC\n for i in range(A.shape[0]):\n Q.append([2*B.shape[1]+C.shape[0]+i+1, C.shape[0]+B.shape[1]+i+1])\n \n inputv = [i+1 for i in range(B.shape[1])]\n outputv = [i+1 for i in range(numpy.shape(sys_append.C)[0])]\n sys_CL = control.connect(sys_append,\n Q,\n inputv,\n outputv)\n \n t = numpy.linspace(0, 100000, 2)\n t, yout = control.step_response(sys_CL[0,0],T=t)\n dcgain = yout[-1]\n gain_w2.value = dcgain\n if dcgain != 0:\n u1 = u/gain_w2.value\n else:\n print('Il guadagno impostato per il riferimento è 0 e quindi viene cambiato a 1')\n u1 = u/1\n print('Il guadagno statico del sistema in anello chiuso (dal riferimento all\\'uscita) è: %.5f' %dcgain)\n \n X0w1 = numpy.zeros((A.shape[0],1))\n for j in range(A.shape[0]):\n X0w1 = numpy.vstack((X0w1,X0w[j]))\n X0w1 = numpy.vstack((X0w1,numpy.zeros((A.shape[0],1))))\n if simTime != 0:\n T = numpy.linspace(0, simTime, 10000)\n else:\n T = numpy.linspace(0, 1, 10000)\n \n if selu == 'impulse': #selu\n U = [0 for t in range(0,len(T))]\n U[0] = u\n U1 = [0 for t in range(0,len(T))]\n U1[0] = u1\n T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)\n T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0])\n if selu == 'step':\n U = [u for t in range(0,len(T))]\n U1 = [u1 for t in range(0,len(T))]\n T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)\n T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0])\n if selu == 'sinusoid':\n U = u*numpy.sin(2*numpy.pi/period*T)\n U1 = u1*numpy.sin(2*numpy.pi/period*T)\n T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)\n T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0])\n if selu == 'square wave':\n U = u*numpy.sign(numpy.sin(2*numpy.pi/period*T))\n U1 = u1*numpy.sign(numpy.sin(2*numpy.pi/period*T))\n T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)\n T, yout_ref, xout_ref = control.forced_response(Gref,T,U,[0, 0])\n # N.B. i primi 3 stati di xout sono quelli del sistema, mentre gli ultimi 3 sono quelli dell'osservatore\n \n step_info_dict = control.step_info(sys_CL[0,0],SettlingTimeThreshold=0.05,T=T)\n print('Step info: \\n\\tTempo di salita =',step_info_dict['RiseTime'],'\\n\\tTempo di assestamento (5%) =',step_info_dict['SettlingTime'],'\\n\\tOvershoot (%)=',step_info_dict['Overshoot'])\n # print('Max x3 value (%)=', max(abs(yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2]))/(numpy.pi/180*17)*100)\n \n fig = plt.figure(num='Simulation1', figsize=(14,12))\n \n fig.add_subplot(221)\n plt.title('Risposta dell\\'uscita')\n plt.ylabel('Uscita')\n plt.plot(T,yout[0],T,yout_ref,T,U,'r--')\n plt.xlabel('$t$ [s]')\n plt.axvline(x=0,color='black',linewidth=0.8)\n plt.axhline(y=0,color='black',linewidth=0.8)\n plt.legend(['$y$','Sistema del secondo ordine di riferimento','Riferimento'])\n plt.grid()\n \n fig.add_subplot(222)\n plt.title('Ingresso')\n plt.ylabel('$u$')\n plt.plot(T,yout[C.shape[0]])\n plt.xlabel('$t$ [s]')\n plt.axvline(x=0,color='black',linewidth=0.8)\n plt.axhline(y=0,color='black',linewidth=0.8)\n plt.grid()\n \n fig.add_subplot(223)\n plt.title('Risposta degli stati')\n plt.ylabel('Stati')\n plt.plot(T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]],\n T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+1],\n T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2])\n plt.xlabel('$t$ [s]')\n plt.axvline(x=0,color='black',linewidth=0.8)\n plt.axhline(y=0,color='black',linewidth=0.8)\n plt.legend(['$x_{1}$','$x_{2}$','$x_{3}$'])\n plt.grid()\n \n fig.add_subplot(224)\n plt.title('Errori di stima')\n plt.ylabel('Errori')\n plt.plot(T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]]-yout[C.shape[0]+B.shape[1]],\n T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+1]-yout[C.shape[0]+B.shape[1]+1],\n T,yout[C.shape[0]+2*B.shape[1]+A.shape[0]+2]-yout[C.shape[0]+B.shape[1]+2])\n plt.xlabel('$t$ [s]')\n plt.axvline(x=0,color='black',linewidth=0.8)\n plt.axhline(y=0,color='black',linewidth=0.8)\n plt.legend(['$e_{1}$','$e_{2}$','$e_{3}$'])\n plt.grid()\n #plt.tight_layout()\n \nalltogether2 = widgets.VBox([widgets.HBox([selm, \n sele,\n selu]),\n widgets.Label(' ',border=3),\n widgets.HBox([widgets.Label('K:',border=3), Kw, \n widgets.Label(' ',border=3),\n widgets.Label(' ',border=3),\n widgets.Label('Autovalori:',border=3), \n eig1c, \n eig2c, \n eig3c,\n widgets.Label(' ',border=3),\n widgets.Label(' ',border=3),\n widgets.Label('X0 stim.:',border=3), X0w]),\n widgets.Label(' ',border=3),\n widgets.HBox([widgets.Label('L:',border=3), Lw, \n widgets.Label(' ',border=3),\n widgets.Label(' ',border=3),\n widgets.Label('Autovalori:',border=3), \n eig1o, \n eig2o, \n eig3o,\n widgets.Label(' ',border=3),\n# widgets.VBox([widgets.Label('Inverse reference gain:',border=3),\n# widgets.Label('Simulation time [s]:',border=3)]),\n widgets.VBox([gain_w2,simTime])]),\n widgets.Label(' ',border=3),\n widgets.HBox([u, \n period, \n START])])\nout2 = widgets.interactive_output(main_callback2, {'Aw':Aw, 'Bw':Bw, 'X0w':X0w, 'K':Kw, 'L':Lw,\n 'eig1c':eig1c, 'eig2c':eig2c, 'eig3c':eig3c, 'eig1o':eig1o, 'eig2o':eig2o, 'eig3o':eig3o, \n 'u':u, 'period':period, 'selm':selm, 'sele':sele, 'selu':selu, 'simTime':simTime, 'DW':DW})\nout2.layout.height = '860px'\ndisplay(out2, alltogether2)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e79cb5f4bb8c5f3076a01e101ce28146d8713620 | 157,506 | ipynb | Jupyter Notebook | Task #2 Prediction using Unsupervised ML.ipynb | aniketspeaks/Task-2-Prediction-using-Unsupervised-ML | 3a7349ade1364144f193f4d3586f91ae81eb7e12 | [
"Apache-2.0"
] | null | null | null | Task #2 Prediction using Unsupervised ML.ipynb | aniketspeaks/Task-2-Prediction-using-Unsupervised-ML | 3a7349ade1364144f193f4d3586f91ae81eb7e12 | [
"Apache-2.0"
] | null | null | null | Task #2 Prediction using Unsupervised ML.ipynb | aniketspeaks/Task-2-Prediction-using-Unsupervised-ML | 3a7349ade1364144f193f4d3586f91ae81eb7e12 | [
"Apache-2.0"
] | null | null | null | 127.123487 | 42,212 | 0.844736 | [
[
[
"# Data Science and Business Analytics Intern @ The Sparks Foundation ",
"_____no_output_____"
],
[
"## Author : Aniket M. Wazarkar",
"_____no_output_____"
],
[
"### Task #2 : Prediction using Unsupervised ML ",
"_____no_output_____"
],
[
"#### Dataset : Iris.csv (https://bit.ly/3kXTdox)\n#### Algorithm used here : K-Means Clustering",
"_____no_output_____"
],
[
"### Import Libraries ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\n",
"_____no_output_____"
]
],
[
[
"### Load Dataset",
"_____no_output_____"
]
],
[
[
"df=pd.read_csv('Iris.csv')\ndf",
"_____no_output_____"
]
],
[
[
"K-Means is considered an unsupervised learning algorthm. This means you only need a features matrix. In the iris dataset, there are four features. In this notebook, the features matrix will only be two features as it is easier to visualize clusters in two dimensions. KMeans is a popular clustering algorithm that we can use to find structure in our data.\n\n",
"_____no_output_____"
]
],
[
[
"\ndf.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 150 entries, 0 to 149\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Id 150 non-null int64 \n 1 SepalLengthCm 150 non-null float64\n 2 SepalWidthCm 150 non-null float64\n 3 PetalLengthCm 150 non-null float64\n 4 PetalWidthCm 150 non-null float64\n 5 Species 150 non-null object \ndtypes: float64(4), int64(1), object(1)\nmemory usage: 7.2+ KB\n"
],
[
"df.Species.unique()",
"_____no_output_____"
],
[
"df[\"Species\"].value_counts()",
"_____no_output_____"
]
],
[
[
"### Arrange Data into Feature Matrix",
"_____no_output_____"
],
[
"Use DataFrame.loc attribute to access a particular cell in the given Dataframe using the index and column labels.\n\n",
"_____no_output_____"
]
],
[
[
"features = ['PetalLengthCm','PetalWidthCm']\n\n# Create features matrix\nx = df.loc[:, features].values",
"_____no_output_____"
],
[
"x",
"_____no_output_____"
]
],
[
[
"class sklearn.preprocessing.LabelEncoder.\n\nEncode target labels with value between 0 and n_classes-1.",
"_____no_output_____"
]
],
[
[
"from sklearn import preprocessing\nle=preprocessing.LabelEncoder()",
"_____no_output_____"
],
[
"df.Species=le.fit_transform(df.Species.values)\ndf.Species",
"_____no_output_____"
],
[
"y=df.Species\ny",
"_____no_output_____"
]
],
[
[
"### Standardize the data",
"_____no_output_____"
],
[
"Standardize features by removing the mean and scaling to unit variance\n\nThe standard score of a sample x is calculated as:\n\nz = (x - u) / s\n\nwhere u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False.\n\n",
"_____no_output_____"
]
],
[
[
"x=StandardScaler().fit_transform(x)",
"_____no_output_____"
]
],
[
[
"### Plot data to estimate number of clusters",
"_____no_output_____"
]
],
[
[
"X=pd.DataFrame(x,columns=features)\nplt.figure(figsize=(6,5))\nplt.scatter(X['PetalLengthCm'], X['PetalWidthCm'])\nplt.xlabel('petal length (cm)')\nplt.ylabel('petal width (cm)');\nplt.title('K-Means Clustering')\n",
"_____no_output_____"
]
],
[
[
"### Finding the optimum number of clusters for K-means clustering",
"_____no_output_____"
]
],
[
[
"# Finding the optimum number of clusters for k-means classification\nwcss = []\n\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters = i, init = 'k-means++', \n max_iter = 300, n_init = 10, random_state = 0)\n kmeans.fit(x)\n wcss.append(kmeans.inertia_)",
"_____no_output_____"
],
[
"# Plotting the results onto a line graph, \n# `allowing us to observe 'The elbow'\nplt.plot(range(1, 11), wcss)\nplt.title('The elbow method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS') # Within cluster sum of squares\nplt.show()",
"_____no_output_____"
]
],
[
[
"It is called 'The elbow method' from the above graph, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration.\n\nFrom this we choose the number of clusters as **'3'**.",
"_____no_output_____"
],
[
"### K-Means Clustering ",
"_____no_output_____"
]
],
[
[
"# Make an instance of KMeans with 3 clusters\nkmeans = KMeans(n_clusters=3, random_state=1)\n\n# Fit only on a features matrix\nkmeans.fit(x)",
"_____no_output_____"
],
[
"# Get labels and cluster centroids\nlabels = kmeans.labels_\ncentroids = kmeans.cluster_centers_",
"_____no_output_____"
],
[
"labels",
"_____no_output_____"
],
[
"centroids",
"_____no_output_____"
]
],
[
[
"### Visually Evaluate the clusters",
"_____no_output_____"
]
],
[
[
"colormap = np.array(['r', 'g', 'b'])\nplt.scatter(X['PetalLengthCm'], X['PetalWidthCm'], c=colormap[labels])\nplt.scatter(centroids[:,0], centroids[:,1], s = 300, marker = 'x', c = 'k')\n\nplt.xlabel('petal length (cm)')\nplt.ylabel('petal width (cm)');",
"_____no_output_____"
]
],
[
[
"### Visually Evaluate the clusters and compare the species",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,4))\n\nplt.subplot(1, 2, 1)\nplt.scatter(X['PetalLengthCm'], X['PetalWidthCm'], c=colormap[labels])\nplt.scatter(centroids[:,0], centroids[:,1], s = 300, marker = 'x', c = 'k')\nplt.xlabel('petal length (cm)')\nplt.ylabel('petal width (cm)');\nplt.title('K-Means Clustering (k = 3)')\n \nplt.subplot(1, 2, 2)\nplt.scatter(X['PetalLengthCm'], X['PetalWidthCm'], c=colormap[y], s=40)\nplt.xlabel('petal length (cm)')\nplt.ylabel('petal width (cm)');\nplt.title('Flower Species')\n\nplt.tight_layout()",
"_____no_output_____"
]
],
[
[
"\nThey look pretty similar. Looks like KMeans picked up flower differences with only two features and not the labels. The colors are different in the two graphs simply because KMeans gives out a arbitrary cluster number and the iris dataset has an arbitrary number in the target column.",
"_____no_output_____"
],
[
"### PCA Projection in 2D ",
"_____no_output_____"
],
[
"The original data has 4 columns (sepal length, sepal width, petal length, and petal width). The code below projects the original data which is 4 dimensional into 2 dimensions. Note that after dimensionality reduction, there usually isn’t a particular meaning assigned to each principal component. The new components are just the two main dimensions of variation",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=2)\n\n# Fit and transform the data\nprincipalComponents = pca.fit_transform(x)\n\nprincipalDf = pd.DataFrame(data = principalComponents, columns = ['principal component 1', 'principal component 2'])",
"_____no_output_____"
],
[
"df=pd.read_csv('Iris.csv')",
"_____no_output_____"
]
],
[
[
"### 2D Projection",
"_____no_output_____"
]
],
[
[
"finalDf = pd.concat([principalDf, df[['Species']]], axis = 1)",
"_____no_output_____"
],
[
"finalDf",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (8,8));\ntargets = df.loc[:, 'Species'].unique()\ncolors = ['r', 'g', 'b']\n\nfor target, color in zip(targets,colors):\n indicesToKeep = finalDf['Species'] == target\n ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1']\n , finalDf.loc[indicesToKeep, 'principal component 2']\n , c = color\n , s = 50)\n\nax.set_xlabel('Principal Component 1', fontsize = 15)\nax.set_ylabel('Principal Component 2', fontsize = 15)\nax.set_title('2 Component PCA', fontsize = 20) \nax.legend(targets)\nax.grid()",
"_____no_output_____"
]
],
[
[
"\nFrom the graph, it looks like the setosa class is well separated from the versicolor and virginica classes.",
"_____no_output_____"
],
[
"### Explained Varience",
"_____no_output_____"
],
[
"The explained variance tells us how much information (variance) can be attributed to each of the principal components. This is important as while you can convert 4 dimensional space to 2 dimensional space, you lose some of the variance (information) when you do this.",
"_____no_output_____"
]
],
[
[
"pca.explained_variance_ratio_",
"_____no_output_____"
],
[
"sum(pca.explained_variance_ratio_)",
"_____no_output_____"
]
],
[
[
"Together, the two principal components contain 100% of the information. The first principal component contains about 98% of the variance. The second principal component contains about 1.8% of the variance.\n\nPCA can be used to help visualize our data.",
"_____no_output_____"
],
[
"**Thank you!**",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e79cbb61051894612b3d50bf9ba9b25c0689f2a8 | 2,207 | ipynb | Jupyter Notebook | notebooks/template.ipynb | devyueightfive/recommendations_project | c0badc47991cc4117c3d790baa40b6d4eb570f7e | [
"MIT"
] | null | null | null | notebooks/template.ipynb | devyueightfive/recommendations_project | c0badc47991cc4117c3d790baa40b6d4eb570f7e | [
"MIT"
] | null | null | null | notebooks/template.ipynb | devyueightfive/recommendations_project | c0badc47991cc4117c3d790baa40b6d4eb570f7e | [
"MIT"
] | null | null | null | 22.989583 | 252 | 0.565927 | [
[
[
"import os\nimport sys\nimport math\nimport logging\nfrom pathlib import Path\n\nimport numpy as np\nimport scipy as sp\nimport sklearn\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\n\n%load_ext autoreload\n%autoreload 2\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport seaborn as sns\nsns.set_context(\"poster\")\nsns.set(rc={'figure.figsize': (16, 9.)})\nsns.set_style(\"whitegrid\")\n\nimport pandas as pd\npd.set_option(\"display.max_rows\", 120)\npd.set_option(\"display.max_columns\", 120)\n\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)",
"_____no_output_____"
],
[
"from recommendations_project import *",
"_____no_output_____"
]
],
[
[
"**PLEASE** save this file right now using the following naming convention: `NUMBER_FOR_SORTING-YOUR_INITIALS-SHORT_DESCRIPTION`, e.g. `1.0-fw-initial-data-exploration`. Use the number to order the file within the directory according to its usage.",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown"
]
] |
e79cc11e855fe723318339a4c9e770710d781365 | 37,131 | ipynb | Jupyter Notebook | samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb | dmc-dk/BCTech | 4ac4ff95a4383009e814fd8f5750247bbafe9c41 | [
"MIT"
] | null | null | null | samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb | dmc-dk/BCTech | 4ac4ff95a4383009e814fd8f5750247bbafe9c41 | [
"MIT"
] | null | null | null | samples/AppInsights/TroubleShootingGuides/Performance-overview-TSG.ipynb | dmc-dk/BCTech | 4ac4ff95a4383009e814fd8f5750247bbafe9c41 | [
"MIT"
] | null | null | null | 66.662478 | 7,183 | 0.560098 | [
[
[
"# Dynamics 365 Business Central Trouble Shooting Guide (TSG) - Performance analysis (overview)\r\n\r\nThis notebook contains Kusto queries that can help getting to the root cause of a performance issue for an environment. Each section in the notebook contains links to the performance tuning guide on docs [aka.ms/bcperformance](aka.ms/bcperformance), links to the documentation of relevant telemetry in [aka.ms/bctelemetry](aka.ms/bctelemetry), as well as Kusto queries that help dive into a specific area (sessions, web service requests, database calls, reports, and page load times).\r\n\r\nNB! Some of the signal used in this notebook is only available in newer versions of Business Central, so check the version of your environment if some sections do not return any data. The signal documentation states in which version a given signal was introduced.",
"_____no_output_____"
],
[
"## 1. Connect to Application Insights\r\nFirst you need to set the notebook Kernel to Python3, load the KQLmagic module (did you install it?) and connect to your Application Insights resource (get appid and appkey from the API access page in the Application Insights portal)",
"_____no_output_____"
]
],
[
[
"# load the KQLmagic module\r\n%reload_ext Kqlmagic\r\n\r\n# Connect to the Application Insights API\r\n%kql appinsights://appid='<add app id from the Application Insights portal>';appkey='<add API key from the Application Insights portal>'",
"_____no_output_____"
]
],
[
[
"## 2. Define filters\r\nThis workbook is designed for troubleshooting a single environment. Please provide values for aadTenantId and environmentName: ",
"_____no_output_____"
]
],
[
[
"aadTenantId = \"<Add AAD tenant id here>\"\r\nenvironmentName = \"<add environment name here>\"",
"_____no_output_____"
]
],
[
[
"# Analyze performance\r\nNow you can run Kusto queries to look for possible root causes for performance issues.\r\n\r\nEither click **Run All** above to run all sections, or scroll down to the type of analysis you want to do and manually run queries",
"_____no_output_____"
],
[
"## Sessions\r\n\r\nPerformance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-online#telemetry\r\n\r\nSession telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-authorization-trace#authorization-succeeded-open-company\r\n\r\nKQL samples: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/Authorization.kql",
"_____no_output_____"
]
],
[
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0004'\r\n and timestamp > ago(7d)\r\n| extend clientType = tostring( customDimensions.clientType )\r\n| summarize request_count=count() by clientType, bin(timestamp, 1d)\r\n| render timechart title= 'Number of sessions by client type'",
"_____no_output_____"
],
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0004'\r\n and timestamp > ago(7d)\r\n| extend clientType = tostring( customDimensions.clientType )\r\n , executionTimeInSec = toreal(totimespan(customDimensions.serverExecutionTime))/10000000\r\n| summarize _count=count() by executionTimeInSeconds = bin(executionTimeInSec, 1), clientType\r\n| extend log_count = log10( _count )\r\n| order by clientType, executionTimeInSeconds asc\r\n| render columnchart with (ycolumns = log_count, series = clientType, title= 'Execution time (in seconds) of session login time by client type', ytitle = 'log(count)') ",
"_____no_output_____"
]
],
[
[
"## Web service requests\r\n\r\nPerformance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services\r\n\r\nWeb service telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace\r\n\r\nKQL samples: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/WebServiceCalls.kql",
"_____no_output_____"
]
],
[
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n| extend category = tostring( customDimensions.category )\r\n| summarize request_count=count() by category, bin(timestamp, 1d)\r\n| render timechart title= 'Number of web service requests by category'",
"_____no_output_____"
],
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n| extend category = tostring( customDimensions.category )\r\n , executionTimeInMS = toreal(totimespan(customDimensions.serverExecutionTime))/10000 //the datatype for executionTime is timespan \r\n| summarize count=count() by executionTime_ms = bin(executionTimeInMS, 100), category\r\n| order by category, executionTime_ms asc\r\n| render columnchart with (ycolumns = count, series = category, title= 'Execution time (in milliseconds) of web service requests by category' ) \r\n",
"_____no_output_____"
]
],
[
[
"## Data related\r\n\r\nPerformance tuning guide: \r\n* [Efficient data access](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#efficient-data-access)\r\n* [Avoid locking](https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-application#avoid-locking)\r\n\r\nDatabase telemetry docs: \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-long-running-sql-query-trace\r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-database-locks-trace\r\n\r\nKQL samples:\r\n* https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/Long%20Running%20SQL%20Queries.kql\r\n* https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/LockTimeouts.kql\r\n",
"_____no_output_____"
]
],
[
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0005'\r\n and timestamp > ago(7d)\r\n| summarize count() by bin(timestamp, 1d)\r\n| render timechart title= 'Number of long running SQL queries'",
"_____no_output_____"
],
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0012'\r\n and timestamp > ago(7d)\r\n| summarize request_count=count() by bin(timestamp, 1d)\r\n| render timechart title= 'Number of database lock timeouts'",
"_____no_output_____"
]
],
[
[
"## Company management\r\n\r\nOperations such as \"copy company\" can cause performance degradations if they are done when users are logged into the system.\r\n\r\nRead more in the performance tuning guide here: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-application#be-cautious-with-the-renamecopy-company-operations\r\n\r\nTelemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-company-lifecycle-trace\r\n\r\nKQL samples: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/CompanyLifecycle.kql\r\n\r\n",
"_____no_output_____"
]
],
[
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId in ('LC0001')\r\n and timestamp > ago(7d)\r\n| extend operation_type = case(\r\n customDimensions.eventId == 'LC0001', 'Company created',\r\n customDimensions.eventId == 'LC0004', 'Company copied',\r\n customDimensions.eventId == 'LC0007', 'Company deleted', \r\n 'Other'\r\n)\r\n| summarize count() by operation_type, bin(timestamp, 1d)\r\n| render timechart title= 'Company management operations'",
"_____no_output_____"
]
],
[
[
"## Reports\r\n\r\nLearn more about how to write performant reports here in the performance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-reports\r\n\r\nReport telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-reports-trace\r\n\r\nKQL samples:\r\n* https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/Reports.kql\r\n* https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/PerformanceTuning/ReportExecution.kql",
"_____no_output_____"
]
],
[
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0006'\r\n and timestamp > ago(7d)\r\n| extend clientType = tostring( customDimensions.clientType )\r\n , reportName = tostring( customDimensions.alObjectName )\r\n| where reportName <> ''\r\n| summarize count=count() by clientType, bin(timestamp, 1d)\r\n| render timechart title= 'Number of reports executed (shown by client/session type)'",
"_____no_output_____"
],
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0006'\r\n and timestamp > ago(7d)\r\n| extend reportName = tostring( customDimensions.alObjectName )\r\n , executionTimeInSec = toreal(totimespan(customDimensions.totalTime))/10000000 //the datatype for totalTime is timespan \r\n| where reportName <> ''\r\n| summarize avg=avg(executionTimeInSec), median=percentile(executionTimeInSec, 50), percentile95=percentile(executionTimeInSec, 95), max=max(executionTimeInSec) \r\n by reportName\r\n| order by percentile95\r\n| limit 10\r\n| render columnchart with (title= 'Execution time stats of reports by report name (top 10 by 95% percentile)', ytitle='Time (in seconds)' ) ",
"_____no_output_____"
]
],
[
[
"## Page views\r\n\r\nPage view telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-page-view-trace\r\n\r\nKQL samples\r\n* https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/PageViews.kql\r\n* https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/BrowserUsage.kql",
"_____no_output_____"
]
],
[
[
"%%kql\r\n// Top 10 longest page times \r\n// \r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\npageViews\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n// and customDimensions.environmentName == _environmentName\r\n| where timestamp > ago(7d)\r\n| extend objectId = tostring(customDimensions.alObjectId)\r\n| summarize median_load_time_in_MS = percentile(duration,50) by pageName=name, objectId\r\n| order by median_load_time_in_MS desc\r\n| limit 10",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79cc78b170a8b3b610cfceb0539dcadf379838f | 153,675 | ipynb | Jupyter Notebook | examples/Notebooks/flopy3_mf6_B_complex-model.ipynb | gyanz/flopy | 282703716a01721e07905da65aa54e6017452a5a | [
"CC0-1.0",
"BSD-3-Clause"
] | 1 | 2019-11-01T00:34:14.000Z | 2019-11-01T00:34:14.000Z | examples/Notebooks/flopy3_mf6_B_complex-model.ipynb | gyanz/flopy | 282703716a01721e07905da65aa54e6017452a5a | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | examples/Notebooks/flopy3_mf6_B_complex-model.ipynb | gyanz/flopy | 282703716a01721e07905da65aa54e6017452a5a | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | 111.763636 | 53,688 | 0.71797 | [
[
[
"# FloPy\n\n## Creating a Complex MODFLOW 6 Model with Flopy\n\nThe purpose of this notebook is to demonstrate the Flopy capabilities for building a more complex MODFLOW 6 model from scratch. This notebook will demonstrate the capabilities by replicating the advgw_tidal model that is distributed with MODFLOW 6.",
"_____no_output_____"
],
[
"### Setup the Notebook Environment",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport sys\nimport os\nimport platform\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n# run installed version of flopy or add local path\ntry:\n import flopy\nexcept:\n fpth = os.path.abspath(os.path.join('..', '..'))\n sys.path.append(fpth)\n import flopy\n\nprint(sys.version)\nprint('numpy version: {}'.format(np.__version__))\nprint('matplotlib version: {}'.format(mpl.__version__))\nprint('flopy version: {}'.format(flopy.__version__))",
"3.6.5 | packaged by conda-forge | (default, Apr 6 2018, 13:44:09) \n[GCC 4.2.1 Compatible Apple LLVM 6.1.0 (clang-602.0.53)]\nnumpy version: 1.14.5\nmatplotlib version: 2.2.2\nflopy version: 3.2.10\n"
],
[
"# For this example, we will set up a model workspace.\n# Model input files and output files will reside here.\nmodel_name = 'advgw_tidal'\nworkspace = os.path.join('data', model_name)\nif not os.path.exists(workspace):\n os.makedirs(workspace)",
"_____no_output_____"
],
[
"data_pth = os.path.join('..', 'data', 'mf6', 'create_tests', \n 'test005_advgw_tidal')\nassert os.path.isdir(data_pth)",
"_____no_output_____"
],
[
"# create simulation\nsim = flopy.mf6.MFSimulation(sim_name=model_name, version='mf6', exe_name='mf6', \n sim_ws=workspace)\n\n# create tdis package\ntdis_rc = [(1.0, 1, 1.0), (10.0, 120, 1.0), \n (10.0, 120, 1.0), (10.0, 120, 1.0)]\ntdis = flopy.mf6.ModflowTdis(sim, pname='tdis', time_units='DAYS', \n nper=4, perioddata=tdis_rc)\n\n# create gwf model\ngwf = flopy.mf6.ModflowGwf(sim, modelname=model_name,\n model_nam_file='{}.nam'.format(model_name))\ngwf.name_file.save_flows = True\n\n# create iterative model solution and register the gwf model with it\nims = flopy.mf6.ModflowIms(sim, pname='ims', print_option='SUMMARY', \n complexity='SIMPLE', outer_hclose=0.0001, \n outer_maximum=500, under_relaxation='NONE', \n inner_maximum=100, inner_hclose=0.0001, \n rcloserecord=0.001, linear_acceleration='CG', \n scaling_method='NONE', reordering_method='NONE', \n relaxation_factor=0.97)\nsim.register_ims_package(ims, [gwf.name])",
"Directory structure already exists for simulation path /Users/jdhughes/Documents/Development/flopy_us/examples/Notebooks/data/advgw_tidal\n"
],
[
"# discretization package\nnlay = 3\nnrow = 15\nncol = 10\nbotlay2 = {'factor':1.0, 'data': [-100 for x in range(150)]}\ndis = flopy.mf6.ModflowGwfdis(gwf, pname='dis', nlay=nlay, nrow=nrow, ncol=ncol, \n delr=500.0, delc=500.0, top=50.0, \n botm=[5.0, -10.0, botlay2], \n fname='{}.dis'.format(model_name))\n\n# initial conditions\nic = flopy.mf6.ModflowGwfic(gwf, pname='ic', strt=50.0,\n fname='{}.ic'.format(model_name))\n\n# node property flow\nnpf = flopy.mf6.ModflowGwfnpf(gwf, pname='npf', save_flows=True, \n icelltype=[1,0,0], \n k=[5.0, 0.1, 4.0],\n k33=[0.5, 0.005, 0.1])\n\n# output control\noc = flopy.mf6.ModflowGwfoc(gwf, pname='oc', budget_filerecord='{}.cbb'.format(model_name),\n head_filerecord='{}.hds'.format(model_name),\n headprintrecord=[('COLUMNS', 10, 'WIDTH', 15,\n 'DIGITS', 6, 'GENERAL')],\n saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')],\n printrecord=[('HEAD', 'FIRST'), ('HEAD', 'LAST'), \n ('BUDGET', 'LAST')])",
"_____no_output_____"
],
[
"# storage package\nsy = flopy.mf6.ModflowGwfsto.sy.empty(gwf, layered=True)\nfor layer in range(0,3):\n sy[layer]['data'] = 0.2\n \nss = flopy.mf6.ModflowGwfsto.ss.empty(gwf, layered=True, default_value=0.000001)\n\nsto = flopy.mf6.ModflowGwfsto(gwf, pname='sto', save_flows=True, iconvert=1, \n ss=ss, sy=sy, steady_state={0:True},\n transient={1:True})",
"_____no_output_____"
],
[
"# well package\n# test empty with aux vars, bound names, and time series\nperiod_two = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=3, aux_vars=['var1', 'var2', 'var3'],\n boundnames=True, timeseries=True)\nperiod_two[0][0] = ((0,11,2), -50.0, -1, -2, -3, None)\nperiod_two[0][1] = ((2,4,7), 'well_1_rate', 1, 2, 3, 'well_1')\nperiod_two[0][2] = ((2,3,2), 'well_2_rate', 4, 5, 6, 'well_2')\nperiod_three = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=2, aux_vars=['var1', 'var2', 'var3'],\n boundnames=True, timeseries=True)\nperiod_three[0][0] = ((2,3,2), 'well_2_rate', 1, 2, 3, 'well_2')\nperiod_three[0][1] = ((2,4,7), 'well_1_rate', 4, 5, 6, 'well_1')\nperiod_four = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=5, aux_vars=['var1', 'var2', 'var3'],\n boundnames=True, timeseries=True)\nperiod_four[0][0] = ((2,4,7), 'well_1_rate', 1, 2, 3, 'well_1')\nperiod_four[0][1] = ((2,3,2), 'well_2_rate', 4, 5, 6, 'well_2')\nperiod_four[0][2] = ((0,11,2), -10.0, 7, 8, 9, None)\nperiod_four[0][3] = ((0,2,4), -20.0, 17, 18, 19, None)\nperiod_four[0][4] = ((0,13,5), -40.0, 27, 28, 29, None)\nstress_period_data = {}\nstress_period_data[1] = period_two[0]\nstress_period_data[2] = period_three[0]\nstress_period_data[3] = period_four[0]\nwel = flopy.mf6.ModflowGwfwel(gwf, pname='wel', print_input=True, print_flows=True,\n auxiliary=[('var1', 'var2', 'var3')], maxbound=5,\n stress_period_data=stress_period_data, boundnames=True, \n save_flows=True,\n ts_filerecord='well-rates.ts')\n\n# well ts package\nts_recarray =[(0.0, 0.0, 0.0, 0.0),\n (1.0, -200.0, 0.0, -100.0),\n (11.0, -1800.0, -500.0, -200.0),\n (21.0, -200.0, -400.0, -300.0),\n (31.0, 0.0, -600.0, -400.0)]\nwell_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='well_ts', fname='well-rates.ts', parent_file=wel,\n timeseries=ts_recarray,\n time_series_namerecord=[('well_1_rate', 'well_2_rate', 'well_3_rate')],\n interpolation_methodrecord=[('stepwise', 'stepwise', 'stepwise')])",
"_____no_output_____"
],
[
"# Evapotranspiration\nevt_period = flopy.mf6.ModflowGwfevt.stress_period_data.empty(gwf, 150, nseg=3)\nfor col in range(0, 10):\n for row in range(0, 15):\n evt_period[0][col*15+row] = (((0, row, col), 50.0, 0.0004, 10.0, 0.2, 0.5, 0.3, 0.1, None))\nevt = flopy.mf6.ModflowGwfevt(gwf, pname='evt', print_input=True, print_flows=True, \n save_flows=True, maxbound=150,\n nseg=3, stress_period_data=evt_period)",
"_____no_output_____"
],
[
"# General-Head Boundaries\nghb_period = {}\nghb_period_array = []\nfor layer, cond in zip(range(1, 3), [15.0, 1500.0]):\n for row in range(0, 15):\n ghb_period_array.append(((layer, row, 9), 'tides', cond, 'Estuary-L2'))\nghb_period[0] = ghb_period_array\nghb = flopy.mf6.ModflowGwfghb(gwf, pname='ghb', print_input=True, print_flows=True, \n save_flows=True, boundnames=True,\n ts_filerecord='tides.ts', \n obs_filerecord='{}.ghb.obs'.format(model_name),\n maxbound=30, stress_period_data=ghb_period)\nts_recarray=[]\nfd = open(os.path.join(data_pth, 'tides.txt'), 'r')\nfor line in fd:\n line_list = line.strip().split(',')\n ts_recarray.append((float(line_list[0]), float(line_list[1])))\nghb_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='tides_ts', fname='tides.ts', \n parent_file=ghb, timeseries=ts_recarray,\n time_series_namerecord='tides',\n interpolation_methodrecord='linear')\nobs_recarray = {'ghb_obs.csv':[('ghb-2-6-10', 'GHB', (1, 5, 9)), \n ('ghb-3-6-10', 'GHB', (2, 5, 9))],\n 'ghb_flows.csv':[('Estuary2', 'GHB', 'Estuary-L2'), \n ('Estuary3', 'GHB', 'Estuary-L3')]}\nghb_obs_package = flopy.mf6.ModflowUtlobs(gwf, pname='ghb_obs', fname='{}.ghb.obs'.format(model_name), \n parent_file=ghb, digits=10, print_input=True, \n continuous=obs_recarray)",
"_____no_output_____"
],
[
"obs_recarray = {'head_obs.csv':[('h1_13_8', 'HEAD', (2, 12, 7))],\n 'intercell_flow_obs1.csv':[('ICF1_1.0', 'FLOW-JA-FACE', (0, 4, 5), (0, 5, 5))],\n 'head-hydrographs.csv':[('h3-13-9', 'HEAD', (2, 12, 8)),\n ('h3-12-8', 'HEAD', (2, 11, 7)),\n ('h1-4-3', 'HEAD', (0, 3, 2)),\n ('h1-12-3', 'HEAD', (0, 11, 2)),\n ('h1-13-9', 'HEAD', (0, 12, 8))]}\nobs_package = flopy.mf6.ModflowUtlobs(gwf, pname='head_obs', fname='{}.obs'.format(model_name), \n digits=10, print_input=True,\n continuous=obs_recarray)",
"_____no_output_____"
],
[
"# River\nriv_period = {}\nriv_period_array = [((0,2,0),'river_stage_1',1001.0,35.9,None),\n ((0,3,1),'river_stage_1',1002.0,35.8,None),\n ((0,4,2),'river_stage_1',1003.0,35.7,None),\n ((0,4,3),'river_stage_1',1004.0,35.6,None),\n ((0,5,4),'river_stage_1',1005.0,35.5,None),\n ((0,5,5),'river_stage_1',1006.0,35.4,'riv1_c6'),\n ((0,5,6),'river_stage_1',1007.0,35.3,'riv1_c7'),\n ((0,4,7),'river_stage_1',1008.0,35.2,None),\n ((0,4,8),'river_stage_1',1009.0,35.1,None),\n ((0,4,9),'river_stage_1',1010.0,35.0,None),\n ((0,9,0),'river_stage_2',1001.0,36.9,'riv2_upper'),\n ((0,8,1),'river_stage_2',1002.0,36.8,'riv2_upper'),\n ((0,7,2),'river_stage_2',1003.0,36.7,'riv2_upper'),\n ((0,6,3),'river_stage_2',1004.0,36.6,None),\n ((0,6,4),'river_stage_2',1005.0,36.5,None),\n ((0,5,5),'river_stage_2',1006.0,36.4,'riv2_c6'),\n ((0,5,6),'river_stage_2',1007.0,36.3,'riv2_c7'),\n ((0,6,7),'river_stage_2',1008.0,36.2,None),\n ((0,6,8),'river_stage_2',1009.0,36.1),\n ((0,6,9),'river_stage_2',1010.0,36.0)]\nriv_period[0] = riv_period_array\nriv = flopy.mf6.ModflowGwfriv(gwf, pname='riv', print_input=True, print_flows=True, \n save_flows='{}.cbc'.format(model_name),\n boundnames=True, ts_filerecord='river_stages.ts',\n maxbound=20, stress_period_data=riv_period, \n obs_filerecord='{}.riv.obs'.format(model_name))\nts_recarray=[(0.0,40.0,41.0),(1.0,41.0,41.5),\n (2.0,43.0,42.0),(3.0,45.0,42.8),\n (4.0,44.0,43.0),(6.0,43.0,43.1),\n (9.0,42.0,42.4),(11.0,41.0,41.5),\n (31.0,40.0,41.0)]\nriv_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='riv_ts', fname='river_stages.ts', \n parent_file=riv,\n timeseries=ts_recarray,\n time_series_namerecord=[('river_stage_1', \n 'river_stage_2')],\n interpolation_methodrecord=[('linear', 'stepwise')])\nobs_recarray = {'riv_obs.csv':[('rv1-3-1', 'RIV', (0,2,0)), ('rv1-4-2', 'RIV', (0,3,1)),\n ('rv1-5-3', 'RIV', (0,4,2)), ('rv1-5-4', 'RIV', (0,4,3)),\n ('rv1-6-5', 'RIV', (0,5,4)), ('rv1-c6', 'RIV', 'riv1_c6'),\n ('rv1-c7', 'RIV', 'riv1_c7'), ('rv2-upper', 'RIV', 'riv2_upper'),\n ('rv-2-7-4', 'RIV', (0,6,3)), ('rv2-8-5', 'RIV', (0,6,4)),\n ('rv-2-9-6', 'RIV', (0,5,5,))],\n 'riv_flowsA.csv':[('riv1-3-1', 'RIV', (0,2,0)), ('riv1-4-2', 'RIV', (0,3,1)),\n ('riv1-5-3', 'RIV', (0,4,2))],\n 'riv_flowsB.csv':[('riv2-10-1', 'RIV', (0,9,0)), ('riv-2-9-2', 'RIV', (0,8,1)),\n ('riv2-8-3', 'RIV', (0,7,2))]}\nriv_obs_package = flopy.mf6.ModflowUtlobs(gwf, pname='riv_obs', \n fname='{}.riv.obs'.format(model_name), \n parent_file=riv, digits=10,\n print_input=True, continuous=obs_recarray)",
"_____no_output_____"
],
[
"# First recharge package\nrch1_period = {}\nrch1_period_array = []\ncol_range = {0:3,1:4,2:5}\nfor row in range(0, 15):\n if row in col_range:\n col_max = col_range[row]\n else:\n col_max = 6\n for col in range(0, col_max):\n if (row == 3 and col == 5) or (row == 2 and col == 4) or (row == 1 and col == 3) or (row == 0 and col == 2):\n mult = 0.5\n else:\n mult = 1.0\n if row == 0 and col == 0:\n bnd = 'rch-1-1'\n elif row == 0 and col == 1:\n bnd = 'rch-1-2'\n elif row == 1 and col == 2:\n bnd = 'rch-2-3'\n else:\n bnd = None\n rch1_period_array.append(((0, row, col), 'rch_1', mult, bnd))\nrch1_period[0] = rch1_period_array\nrch1 = flopy.mf6.ModflowGwfrch(gwf, fname='{}_1.rch'.format(model_name), \n pname='rch_1', fixed_cell=True,\n auxiliary='MULTIPLIER', auxmultname='MULTIPLIER',\n print_input=True, print_flows=True, \n save_flows=True, boundnames=True,\n ts_filerecord='recharge_rates_1.ts', \n maxbound=84, stress_period_data=rch1_period)\nts_recarray=[(0.0, 0.0015), (1.0, 0.0010),\n (11.0, 0.0015),(21.0, 0.0025),\n (31.0, 0.0015)]\nrch1_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='rch_1_ts', \n fname='recharge_rates_1.ts', \n parent_file=rch1,\n timeseries=ts_recarray,\n time_series_namerecord='rch_1',\n interpolation_methodrecord='stepwise')",
"_____no_output_____"
],
[
"# Second recharge package\nrch2_period = {}\nrch2_period_array = [((0,0,2), 'rch_2', 0.5),\n ((0,0,3), 'rch_2', 1.0),\n ((0,0,4), 'rch_2', 1.0),\n ((0,0,5), 'rch_2', 1.0),\n ((0,0,6), 'rch_2', 1.0),\n ((0,0,7), 'rch_2', 1.0),\n ((0,0,8), 'rch_2', 1.0),\n ((0,0,9), 'rch_2', 0.5),\n ((0,1,3), 'rch_2', 0.5),\n ((0,1,4), 'rch_2', 1.0),\n ((0,1,5), 'rch_2', 1.0),\n ((0,1,6), 'rch_2', 1.0),\n ((0,1,7), 'rch_2', 1.0),\n ((0,1,8), 'rch_2', 0.5),\n ((0,2,4), 'rch_2', 0.5),\n ((0,2,5), 'rch_2', 1.0),\n ((0,2,6), 'rch_2', 1.0),\n ((0,2,7), 'rch_2', 0.5),\n ((0,3,5), 'rch_2', 0.5),\n ((0,3,6), 'rch_2', 0.5)]\nrch2_period[0] = rch2_period_array\nrch2 = flopy.mf6.ModflowGwfrch(gwf, fname='{}_2.rch'.format(model_name), \n pname='rch_2', fixed_cell=True,\n auxiliary='MULTIPLIER', auxmultname='MULTIPLIER',\n print_input=True, print_flows=True, save_flows=True,\n ts_filerecord='recharge_rates_2.ts', maxbound=20, \n stress_period_data=rch2_period)\nts_recarray=[(0.0, 0.0016), (1.0, 0.0018),\n (11.0, 0.0019),(21.0, 0.0016),\n (31.0, 0.0018)]\nrch2_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='rch_2_ts', \n fname='recharge_rates_2.ts', \n parent_file=rch2,\n timeseries=ts_recarray,\n time_series_namerecord='rch_2',\n interpolation_methodrecord='linear')",
"_____no_output_____"
],
[
"# Third recharge package\nrch3_period = {}\nrch3_period_array = []\ncol_range = {0:9,1:8,2:7}\nfor row in range(0, 15):\n if row in col_range:\n col_min = col_range[row]\n else:\n col_min = 6\n for col in range(col_min, 10):\n if (row == 0 and col == 9) or (row == 1 and col == 8) or (row == 2 and col == 7) or (row == 3 and col == 6):\n mult = 0.5\n else:\n mult = 1.0\n rch3_period_array.append(((0, row, col), 'rch_3', mult))\nrch3_period[0] = rch3_period_array\nrch3 = flopy.mf6.ModflowGwfrch(gwf, fname='{}_3.rch'.format(model_name), \n pname='rch_3', fixed_cell=True,\n auxiliary='MULTIPLIER', auxmultname='MULTIPLIER',\n print_input=True, print_flows=True, save_flows=True,\n ts_filerecord='recharge_rates_3.ts', maxbound=54, \n stress_period_data=rch3_period)\nts_recarray=[(0.0, 0.0017),(1.0, 0.0020),(11.0, 0.0017),(21.0, 0.0018),(31.0, 0.0020)]\nrch3_ts_package = flopy.mf6.ModflowUtlts(gwf, pname='rch_3_ts', \n fname='recharge_rates_3.ts', \n parent_file=rch3,\n timeseries=ts_recarray,\n time_series_namerecord='rch_3',\n interpolation_methodrecord='linear')",
"_____no_output_____"
]
],
[
[
"### Create the MODFLOW 6 Input Files and Run the Model\n\nOnce all the flopy objects are created, it is very easy to create all of the input files and run the model.",
"_____no_output_____"
]
],
[
[
"# change folder to save simulation\n#sim.simulation_data.mfpath.set_sim_path(run_folder)",
"_____no_output_____"
],
[
"# write simulation to new location\nsim.write_simulation()",
"writing simulation...\n writing simulation name file...\n writing simulation tdis package...\n writing ims package ims...\n writing model advgw_tidal...\n writing model name file...\n writing package dis...\n writing package ic...\n writing package npf...\n writing package oc...\n writing package sto...\n writing package wel...\n writing package well_ts...\n writing package evt...\n writing package ghb...\n writing package tides_ts...\n writing package ghb_obs...\n writing package head_obs...\n writing package riv...\n writing package riv_ts...\n writing package riv_obs...\n writing package rch_1...\n writing package rch_1_ts...\n writing package rch_2...\n writing package rch_2_ts...\n writing package rch_3...\n writing package rch_3_ts...\n"
],
[
"# Print a list of the files that were created\n# in workspace\nprint(os.listdir(workspace))",
"['intercell_flow_obs1.csv', 'riv_flowsA.csv', 'advgw_tidal.nam', 'advgw_tidal.ic', 'riv_flowsB.csv', 'advgw_tidal.dis.grb', 'recharge_rates_1.ts', 'advgw_tidal.sto', 'advgw_tidal.lst', 'tides.ts', 'advgw_tidal.cbb', 'advgw_tidal.ims', 'well-rates.ts', 'advgw_tidal.ghb', 'advgw_tidal.obs', 'advgw_tidal.riv.obs', 'ghb_flows.csv', 'advgw_tidal.dis', 'advgw_tidal_1.rch', 'advgw_tidal_3.rch', 'advgw_tidal_2.rch', 'advgw_tidal.oc', 'river_stages.ts', 'advgw_tidal.hds', 'advgw_tidal.wel', 'advgw_tidal.ghb.obs', 'advgw_tidal.npf', 'head_obs.csv', 'advgw_tidal.tdis', 'ghb_obs.csv', 'recharge_rates_2.ts', 'mfsim.nam', 'advgw_tidal.riv', 'head-hydrographs.csv', 'mfsim.lst', 'recharge_rates_3.ts', 'advgw_tidal.evt', 'riv_obs.csv']\n"
]
],
[
[
"### Run the Simulation\n\nWe can also run the simulation from the notebook, but only if the MODFLOW 6 executable is available. The executable can be made available by putting the executable in a folder that is listed in the system path variable. Another option is to just put a copy of the executable in the simulation folder, though this should generally be avoided. A final option is to provide a full path to the executable when the simulation is constructed. This would be done by specifying exe_name with the full path.",
"_____no_output_____"
]
],
[
[
"# Run the simulation\nsuccess, buff = sim.run_simulation()\nprint('\\nSuccess is: ', success)",
"FloPy is using the following executable to run the model: /Users/jdhughes/.local/bin/mf6\n MODFLOW 6\n U.S. GEOLOGICAL SURVEY MODULAR HYDROLOGIC MODEL\n VERSION 6.0.3 08/09/2018\n\n MODFLOW 6 compiled Sep 24 2018 16:09:01 with GFORTRAN compiler (ver. 6.4.0)\n\nThis software has been approved for release by the U.S. Geological \nSurvey (USGS). Although the software has been subjected to rigorous \nreview, the USGS reserves the right to update the software as needed \npursuant to further analysis and review. No warranty, expressed or \nimplied, is made by the USGS or the U.S. Government as to the \nfunctionality of the software and related material nor shall the \nfact of release constitute any such warranty. Furthermore, the \nsoftware is released on condition that neither the USGS nor the U.S. \nGovernment shall be held liable for any damages resulting from its \nauthorized or unauthorized use. Also refer to the USGS Water \nResources Software User Rights Notice for complete use, copyright, \nand distribution information.\n\n Run start date and time (yyyy/mm/dd hh:mm:ss): 2018/10/19 16:29:49\n\n Writing simulation list file: mfsim.lst\n Using Simulation name file: mfsim.nam\n Solving: Stress period: 1 Time step: 1\n Solving: Stress period: 2 Time step: 1\n Solving: Stress period: 2 Time step: 2\n Solving: Stress period: 2 Time step: 3\n Solving: Stress period: 2 Time step: 4\n Solving: Stress period: 2 Time step: 5\n Solving: Stress period: 2 Time step: 6\n Solving: Stress period: 2 Time step: 7\n Solving: Stress period: 2 Time step: 8\n Solving: Stress period: 2 Time step: 9\n Solving: Stress period: 2 Time step: 10\n Solving: Stress period: 2 Time step: 11\n Solving: Stress period: 2 Time step: 12\n Solving: Stress period: 2 Time step: 13\n Solving: Stress period: 2 Time step: 14\n Solving: Stress period: 2 Time step: 15\n Solving: Stress period: 2 Time step: 16\n Solving: Stress period: 2 Time step: 17\n Solving: Stress period: 2 Time step: 18\n Solving: Stress period: 2 Time step: 19\n Solving: Stress period: 2 Time step: 20\n Solving: Stress period: 2 Time step: 21\n Solving: Stress period: 2 Time step: 22\n Solving: Stress period: 2 Time step: 23\n Solving: Stress period: 2 Time step: 24\n Solving: Stress period: 2 Time step: 25\n Solving: Stress period: 2 Time step: 26\n Solving: Stress period: 2 Time step: 27\n Solving: Stress period: 2 Time step: 28\n Solving: Stress period: 2 Time step: 29\n Solving: Stress period: 2 Time step: 30\n Solving: Stress period: 2 Time step: 31\n Solving: Stress period: 2 Time step: 32\n Solving: Stress period: 2 Time step: 33\n Solving: Stress period: 2 Time step: 34\n Solving: Stress period: 2 Time step: 35\n Solving: Stress period: 2 Time step: 36\n Solving: Stress period: 2 Time step: 37\n Solving: Stress period: 2 Time step: 38\n Solving: Stress period: 2 Time step: 39\n Solving: Stress period: 2 Time step: 40\n Solving: Stress period: 2 Time step: 41\n Solving: Stress period: 2 Time step: 42\n Solving: Stress period: 2 Time step: 43\n Solving: Stress period: 2 Time step: 44\n Solving: Stress period: 2 Time step: 45\n Solving: Stress period: 2 Time step: 46\n Solving: Stress period: 2 Time step: 47\n Solving: Stress period: 2 Time step: 48\n Solving: Stress period: 2 Time step: 49\n Solving: Stress period: 2 Time step: 50\n Solving: Stress period: 2 Time step: 51\n Solving: Stress period: 2 Time step: 52\n Solving: Stress period: 2 Time step: 53\n Solving: Stress period: 2 Time step: 54\n Solving: Stress period: 2 Time step: 55\n Solving: Stress period: 2 Time step: 56\n Solving: Stress period: 2 Time step: 57\n Solving: Stress period: 2 Time step: 58\n Solving: Stress period: 2 Time step: 59\n Solving: Stress period: 2 Time step: 60\n Solving: Stress period: 2 Time step: 61\n Solving: Stress period: 2 Time step: 62\n Solving: Stress period: 2 Time step: 63\n Solving: Stress period: 2 Time step: 64\n Solving: Stress period: 2 Time step: 65\n Solving: Stress period: 2 Time step: 66\n Solving: Stress period: 2 Time step: 67\n Solving: Stress period: 2 Time step: 68\n Solving: Stress period: 2 Time step: 69\n Solving: Stress period: 2 Time step: 70\n Solving: Stress period: 2 Time step: 71\n Solving: Stress period: 2 Time step: 72\n Solving: Stress period: 2 Time step: 73\n Solving: Stress period: 2 Time step: 74\n Solving: Stress period: 2 Time step: 75\n Solving: Stress period: 2 Time step: 76\n Solving: Stress period: 2 Time step: 77\n Solving: Stress period: 2 Time step: 78\n Solving: Stress period: 2 Time step: 79\n Solving: Stress period: 2 Time step: 80\n Solving: Stress period: 2 Time step: 81\n Solving: Stress period: 2 Time step: 82\n Solving: Stress period: 2 Time step: 83\n Solving: Stress period: 2 Time step: 84\n Solving: Stress period: 2 Time step: 85\n Solving: Stress period: 2 Time step: 86\n Solving: Stress period: 2 Time step: 87\n Solving: Stress period: 2 Time step: 88\n Solving: Stress period: 2 Time step: 89\n Solving: Stress period: 2 Time step: 90\n Solving: Stress period: 2 Time step: 91\n Solving: Stress period: 2 Time step: 92\n Solving: Stress period: 2 Time step: 93\n Solving: Stress period: 2 Time step: 94\n Solving: Stress period: 2 Time step: 95\n Solving: Stress period: 2 Time step: 96\n Solving: Stress period: 2 Time step: 97\n Solving: Stress period: 2 Time step: 98\n Solving: Stress period: 2 Time step: 99\n Solving: Stress period: 2 Time step: 100\n Solving: Stress period: 2 Time step: 101\n Solving: Stress period: 2 Time step: 102\n Solving: Stress period: 2 Time step: 103\n Solving: Stress period: 2 Time step: 104\n Solving: Stress period: 2 Time step: 105\n Solving: Stress period: 2 Time step: 106\n Solving: Stress period: 2 Time step: 107\n Solving: Stress period: 2 Time step: 108\n Solving: Stress period: 2 Time step: 109\n Solving: Stress period: 2 Time step: 110\n Solving: Stress period: 2 Time step: 111\n Solving: Stress period: 2 Time step: 112\n Solving: Stress period: 2 Time step: 113\n Solving: Stress period: 2 Time step: 114\n Solving: Stress period: 2 Time step: 115\n Solving: Stress period: 2 Time step: 116\n Solving: Stress period: 2 Time step: 117\n Solving: Stress period: 2 Time step: 118\n Solving: Stress period: 2 Time step: 119\n Solving: Stress period: 2 Time step: 120\n Solving: Stress period: 3 Time step: 1\n Solving: Stress period: 3 Time step: 2\n Solving: Stress period: 3 Time step: 3\n Solving: Stress period: 3 Time step: 4\n Solving: Stress period: 3 Time step: 5\n Solving: Stress period: 3 Time step: 6\n Solving: Stress period: 3 Time step: 7\n Solving: Stress period: 3 Time step: 8\n Solving: Stress period: 3 Time step: 9\n Solving: Stress period: 3 Time step: 10\n Solving: Stress period: 3 Time step: 11\n Solving: Stress period: 3 Time step: 12\n Solving: Stress period: 3 Time step: 13\n Solving: Stress period: 3 Time step: 14\n Solving: Stress period: 3 Time step: 15\n Solving: Stress period: 3 Time step: 16\n Solving: Stress period: 3 Time step: 17\n Solving: Stress period: 3 Time step: 18\n Solving: Stress period: 3 Time step: 19\n Solving: Stress period: 3 Time step: 20\n Solving: Stress period: 3 Time step: 21\n Solving: Stress period: 3 Time step: 22\n Solving: Stress period: 3 Time step: 23\n Solving: Stress period: 3 Time step: 24\n Solving: Stress period: 3 Time step: 25\n Solving: Stress period: 3 Time step: 26\n Solving: Stress period: 3 Time step: 27\n Solving: Stress period: 3 Time step: 28\n Solving: Stress period: 3 Time step: 29\n Solving: Stress period: 3 Time step: 30\n Solving: Stress period: 3 Time step: 31\n Solving: Stress period: 3 Time step: 32\n Solving: Stress period: 3 Time step: 33\n Solving: Stress period: 3 Time step: 34\n Solving: Stress period: 3 Time step: 35\n Solving: Stress period: 3 Time step: 36\n Solving: Stress period: 3 Time step: 37\n Solving: Stress period: 3 Time step: 38\n Solving: Stress period: 3 Time step: 39\n Solving: Stress period: 3 Time step: 40\n Solving: Stress period: 3 Time step: 41\n Solving: Stress period: 3 Time step: 42\n Solving: Stress period: 3 Time step: 43\n Solving: Stress period: 3 Time step: 44\n Solving: Stress period: 3 Time step: 45\n Solving: Stress period: 3 Time step: 46\n Solving: Stress period: 3 Time step: 47\n Solving: Stress period: 3 Time step: 48\n Solving: Stress period: 3 Time step: 49\n Solving: Stress period: 3 Time step: 50\n Solving: Stress period: 3 Time step: 51\n Solving: Stress period: 3 Time step: 52\n Solving: Stress period: 3 Time step: 53\n Solving: Stress period: 3 Time step: 54\n Solving: Stress period: 3 Time step: 55\n Solving: Stress period: 3 Time step: 56\n Solving: Stress period: 3 Time step: 57\n Solving: Stress period: 3 Time step: 58\n Solving: Stress period: 3 Time step: 59\n Solving: Stress period: 3 Time step: 60\n Solving: Stress period: 3 Time step: 61\n Solving: Stress period: 3 Time step: 62\n Solving: Stress period: 3 Time step: 63\n Solving: Stress period: 3 Time step: 64\n Solving: Stress period: 3 Time step: 65\n Solving: Stress period: 3 Time step: 66\n Solving: Stress period: 3 Time step: 67\n Solving: Stress period: 3 Time step: 68\n Solving: Stress period: 3 Time step: 69\n Solving: Stress period: 3 Time step: 70\n Solving: Stress period: 3 Time step: 71\n Solving: Stress period: 3 Time step: 72\n Solving: Stress period: 3 Time step: 73\n Solving: Stress period: 3 Time step: 74\n Solving: Stress period: 3 Time step: 75\n Solving: Stress period: 3 Time step: 76\n Solving: Stress period: 3 Time step: 77\n Solving: Stress period: 3 Time step: 78\n Solving: Stress period: 3 Time step: 79\n Solving: Stress period: 3 Time step: 80\n Solving: Stress period: 3 Time step: 81\n Solving: Stress period: 3 Time step: 82\n Solving: Stress period: 3 Time step: 83\n Solving: Stress period: 3 Time step: 84\n Solving: Stress period: 3 Time step: 85\n Solving: Stress period: 3 Time step: 86\n Solving: Stress period: 3 Time step: 87\n Solving: Stress period: 3 Time step: 88\n Solving: Stress period: 3 Time step: 89\n Solving: Stress period: 3 Time step: 90\n Solving: Stress period: 3 Time step: 91\n Solving: Stress period: 3 Time step: 92\n Solving: Stress period: 3 Time step: 93\n Solving: Stress period: 3 Time step: 94\n Solving: Stress period: 3 Time step: 95\n Solving: Stress period: 3 Time step: 96\n Solving: Stress period: 3 Time step: 97\n Solving: Stress period: 3 Time step: 98\n Solving: Stress period: 3 Time step: 99\n Solving: Stress period: 3 Time step: 100\n Solving: Stress period: 3 Time step: 101\n"
]
],
[
[
"### Post-Process Head Results\n\nPost-processing MODFLOW 6 results is still a work in progress. There aren't any Flopy plotting functions built in yet, like they are for other MODFLOW versions. So we need to plot the results using general Flopy capabilities. We can also use some of the Flopy ModelMap capabilities for MODFLOW 6, but in order to do so, we need to manually create a SpatialReference object, that is needed for the plotting. Examples of both approaches are shown below.\n\nFirst, a link to the heads file is created with `HeadFile`. The link can then be accessed with the `get_data` function, by specifying, in this case, the step number and period number for which we want to retrieve data. A three-dimensional array is returned of size `nlay, nrow, ncol`. Matplotlib contouring functions are used to make contours of the layers or a cross-section.",
"_____no_output_____"
]
],
[
[
"# Read the binary head file and plot the results\n# We can use the existing Flopy HeadFile class because\n# the format of the headfile for MODFLOW 6 is the same\n# as for previous MODFLOW verions\nheadfile = '{}.hds'.format(model_name)\nfname = os.path.join(workspace, headfile)\nhds = flopy.utils.binaryfile.HeadFile(fname)\nh = hds.get_data()",
"_____no_output_____"
],
[
"# We can also use the Flopy model map capabilities for MODFLOW 6\n# but in order to do so, we need to manually create a\n# SpatialReference object\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_subplot(1, 1, 1, aspect='equal')\nsr = flopy.utils.reference.SpatialReference(delr=dis.delr[:],\n delc=dis.delc[:])\n\n# Next we create an instance of the ModelMap class\nmodelmap = flopy.plot.ModelMap(sr=sr)\n\n# Then we can use the plot_grid() method to draw the grid\n# The return value for this function is a matplotlib LineCollection object,\n# which could be manipulated (or used) later if necessary.\n#quadmesh = modelmap.plot_ibound(ibound=ibd)\nlinecollection = modelmap.plot_grid()\ncontours = modelmap.contour_array(h[0])",
"_____no_output_____"
]
],
[
[
"### Post-Process Flows\n\nMODFLOW 6 writes a binary grid file, which contains information about the model grid. MODFLOW 6 also writes a binary budget file, which contains flow information. Both of these files can be read using Flopy capabilities. The MfGrdFile class in Flopy can be used to read the binary grid file. The CellBudgetFile class in Flopy can be used to read the binary budget file written by MODFLOW 6.",
"_____no_output_____"
]
],
[
[
"# read the binary grid file\nfname = os.path.join(workspace, '{}.dis.grb'.format(model_name))\nbgf = flopy.utils.mfgrdfile.MfGrdFile(fname)\n\n# data read from the binary grid file is stored in a dictionary\nbgf._datadict",
"_____no_output_____"
],
[
"# Information from the binary grid file is easily retrieved\nia = bgf._datadict['IA'] - 1\nja = bgf._datadict['JA'] - 1",
"_____no_output_____"
],
[
"# read the cell budget file\nfname = os.path.join(workspace, '{}.cbb'.format(model_name))\ncbb = flopy.utils.CellBudgetFile(fname, precision='double')\n#cbb.list_records()\n\nflowja = cbb.get_data(text='FLOW-JA-FACE')[0][0, 0, :]",
"_____no_output_____"
],
[
"# By having the ia and ja arrays and the flow-ja-face we can look at\n# the flows for any cell and process them in the follow manner.\nk = 2; i = 7; j = 7\ncelln = k * nrow * ncol + i * nrow + j\nprint('Printing flows for cell {}'.format(celln + 1))\nfor ipos in range(ia[celln] + 1, ia[celln + 1]):\n cellm = ja[ipos] # change from one-based to zero-based\n print('Cell {} flow with cell {} is {}'.format(celln + 1, cellm + 1, flowja[ipos]))",
"Printing flows for cell 413\nCell 413 flow with cell 263 is 251.46262091207714\nCell 413 flow with cell 403 is 0.7176346498604858\nCell 413 flow with cell 412 is 439.86299685437785\nCell 413 flow with cell 414 is -693.4212447574212\nCell 413 flow with cell 423 is 1.3779378787791075\n"
],
[
"fname = 'head-hydrographs.csv'\nfname = os.path.join(workspace, fname)\ncsv = np.genfromtxt(fname, delimiter=',', dtype=None, names=True)\nfor name in csv.dtype.names[1:]:\n plt.plot(csv['time'], csv[name], label=name)\nplt.legend()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e79ccd64ba6f5d11b7bbe18964e6d6090e37731b | 81,141 | ipynb | Jupyter Notebook | jupyter_notebooks/machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | 2 | 2021-02-13T05:52:05.000Z | 2022-02-08T09:52:35.000Z | machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | null | null | null | machine_learning/ebook_mastering_ml_in_6_steps/Chapter_5_Code/Code/Document_Clustering.ipynb | manual123/Nacho-Jupyter-Notebooks | e75523434b1a90313a6b44e32b056f63de8a7135 | [
"MIT"
] | null | null | null | 266.911184 | 58,042 | 0.917255 | [
[
[
"### Clustering text documents using k-means\n\nAs an example we'll be using the 20 newsgroups dataset consists of 18000+ newsgroup posts on 20 topics. You can learn more about the dataset at http://qwone.com/~jason/20Newsgroups/",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Load data\n\n",
"_____no_output_____"
]
],
[
[
"newsgroups_train = fetch_20newsgroups(subset='train')\nprint(list(newsgroups_train.target_names))",
"['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc']\n"
]
],
[
[
"To keep it simple, let's filter only 3 topics. Assume that we do not know the topics, let's run clustering algorithm and examine the keywords of each clusters",
"_____no_output_____"
]
],
[
[
"categories = ['alt.atheism', 'comp.graphics', 'rec.motorcycles']\n\ndataset = fetch_20newsgroups(subset='all', categories=categories, shuffle=True, random_state=2017)\n\nprint(\"%d documents\" % len(dataset.data))\nprint(\"%d categories\" % len(dataset.target_names))\n\nlabels = dataset.target\n\nprint(\"Extracting features from the dataset using a sparse vectorizer\")\nvectorizer = TfidfVectorizer(stop_words='english')\nX = vectorizer.fit_transform(dataset.data)\n\nprint(\"n_samples: %d, n_features: %d\" % X.shape)",
"2768 documents\n3 categories\nExtracting features from the dataset using a sparse vectorizer\nn_samples: 2768, n_features: 35311\n"
]
],
[
[
"### LSA via SVD\n\nLatent Semantic Analysis (LSA) is a mathematical method that tries to bring out latent relationships within a collection of documents. Rather than looking at each document isolated from the others it looks at all the documents as a whole and the terms within them to identify relationships. Let's perform LSA by running SVD on the data to reduce the dimensionality. \n\nSVD of matrix A = U * ∑ * VT\n\n* r = rank of matrix X\n* U = column orthonormal m * r matrix\n* ∑ = diagonal r * r matrix with singular value sorted in descending order\n* V = column orthonormal r * n matrix\n\nIn our case we have 3 topics, 2768 documents and 35311 word vocabulary. \n\n* Original matrix = 2768*35311 ~ 10^8\n* SVD = 3*2768 + 3 + 3*35311 ~ 10^5.3\n\n##### Resulted SVD is taking approximately 460 times less space than original matrix.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(filename='../Chapter 5 Figures/SVD.png', width=500)",
"_____no_output_____"
],
[
"from sklearn.decomposition import TruncatedSVD\n\n# Lets reduce the dimensionality to 2000\nsvd = TruncatedSVD(2000)\nlsa = make_pipeline(svd, Normalizer(copy=False))\n\nX = lsa.fit_transform(X)\n\nexplained_variance = svd.explained_variance_ratio_.sum()\nprint(\"Explained variance of the SVD step: {}%\".format(int(explained_variance * 100)))",
"Explained variance of the SVD step: 95%\n"
]
],
[
[
"### k-means clustering",
"_____no_output_____"
]
],
[
[
"from __future__ import print_function\n\nkm = KMeans(n_clusters=3, init='k-means++', max_iter=100, n_init=1)\n\n# Scikit learn provides MiniBatchKMeans to run k-means in batch mode suitable for a very large corpus\n# km = MiniBatchKMeans(n_clusters=5, init='k-means++', n_init=1, init_size=1000, batch_size=1000)\n\nprint(\"Clustering sparse data with %s\" % km)\nkm.fit(X)\n\nprint(\"Top terms per cluster:\")\noriginal_space_centroids = svd.inverse_transform(km.cluster_centers_)\norder_centroids = original_space_centroids.argsort()[:, ::-1]\n \nterms = vectorizer.get_feature_names()\nfor i in range(3):\n print(\"Cluster %d:\" % i, end='')\n for ind in order_centroids[i, :10]:\n print(' %s' % terms[ind], end='')\n print()",
"Clustering sparse data with KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=100,\n n_clusters=3, n_init=1, n_jobs=1, precompute_distances='auto',\n random_state=None, tol=0.0001, verbose=0)\nTop terms per cluster:\nCluster 0: edu graphics university god subject lines organization com posting uk\nCluster 1: com bike edu dod ca writes article sun like organization\nCluster 2: keith sgi livesey caltech com solntze wpd jon edu sandvik\n"
]
],
[
[
"### Hierarchical clustering",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics.pairwise import cosine_similarity\ndist = 1 - cosine_similarity(X)",
"_____no_output_____"
],
[
"from scipy.cluster.hierarchy import ward, dendrogram\n\nlinkage_matrix = ward(dist) #define the linkage_matrix using ward clustering pre-computed distances\n\nfig, ax = plt.subplots(figsize=(8, 8)) # set size\nax = dendrogram(linkage_matrix, orientation=\"right\")\n\nplt.tick_params(axis= 'x', which='both', bottom='off', top='off', labelbottom='off')\n\nplt.tight_layout() #show plot with tight layout\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79cd08d9f9f67ddb63fb360f327b1d8f619b0ec | 4,161 | ipynb | Jupyter Notebook | tests/nb/test_draw_protein.ipynb | jparkhill/notebook-molecular-visualization | 2dd61fedcf363d7362b727669b86c5f1c07656fd | [
"Apache-2.0"
] | 55 | 2016-07-21T23:25:59.000Z | 2022-02-14T01:04:49.000Z | tests/nb/test_draw_protein.ipynb | jparkhill/notebook-molecular-visualization | 2dd61fedcf363d7362b727669b86c5f1c07656fd | [
"Apache-2.0"
] | 40 | 2016-07-26T20:57:04.000Z | 2021-09-06T02:31:52.000Z | tests/nb/test_draw_protein.ipynb | Autodesk/notebook-molecular-visualization | 2dd61fedcf363d7362b727669b86c5f1c07656fd | [
"Apache-2.0"
] | 18 | 2016-07-25T21:49:02.000Z | 2020-10-03T11:17:03.000Z | 22.491892 | 106 | 0.542898 | [
[
[
"#! setup\nimport os\nimport moldesign as mdt\nfrom IPython.display import display\n\nprotease = mdt.read(os.path.join(mdt.__path__[0], '_tests', 'data', '3aid.pdb'))",
"_____no_output_____"
],
[
"#! test_draw_3aid\nprotease.draw(width='600px', height=600)",
"_____no_output_____"
],
[
"# ! test_color_by_list\nviewer = protease.draw(style='stick')\nviewer.color_by([atom.index for atom in protease.atoms])\nviewer",
"_____no_output_____"
],
[
"#! test_color_by_lambda\nviewer = protease.draw()\nviewer.color_by(lambda atom: atom.chain)\nviewer",
"_____no_output_____"
],
[
"#! test_spheres\nviewer = protease.draw()\nviewer.spheres()\nviewer",
"_____no_output_____"
],
[
"#! test_lines\nviewer = protease.draw()\nviewer.line()\nviewer.color_by(lambda atom: sum(n.num_bonds for n in atom.bonded_atoms))\nviewer",
"_____no_output_____"
],
[
"#! test_set_color\n#! fixture: colors_set\nviewer = protease.draw()\nviewer.set_color('hotpink', atoms=[atom for atom in protease.atoms[:1000] if atom.atnum == 6])\nviewer.lines()\nviewer",
"_____no_output_____"
],
[
"#! test_unset_color\n#! with_fixture: colors_set\ndisplay(viewer)\nviewer.sticks()\nviewer.unset_color(protease.atoms[:800])",
"_____no_output_____"
],
[
"#! test_color_by_string\nviewer = protease.draw()\nviewer.color_by('residue')\nviewer",
"_____no_output_____"
],
[
"#! test_sphere_subset\nviewer = protease.draw()\nviewer.sphere(atoms=protease.atoms[:100], color='purple', opacity=0.8, radius=3.0)\nviewer",
"_____no_output_____"
],
[
"#! test_protease_label\nviewer = protease.draw()\nviewer.draw_label(protease.residues[-1].com, text=protease.residues[0].name)\nviewer.draw_label(protease.com, text='Center of mass', fontsize=12, opacity=1.0, background=None)\nfor residue in protease.residues:\n if residue.type != 'protein':\n viewer.draw_label(residue.com, text=residue.resname,\n background='red', color='blue', fontsize=22, opacity=0.5)\nviewer",
"_____no_output_____"
],
[
"#! test_clipping_planes\nviewer = protease.draw3d(display=True)\nviewer.near_clip=-1.5\nviewer.far_clip=1.5",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79cdbb5c289277d364cd09136a4843acd4dca75 | 63,557 | ipynb | Jupyter Notebook | FAI_old/lesson2/lesson2_codealong.ipynb | WNoxchi/Kawkasos | 42c5070a8fa4a5e2d6386dc19d385e82a1d73fb2 | [
"MIT"
] | 7 | 2017-07-28T06:17:29.000Z | 2021-03-19T08:43:07.000Z | FAI_old/lesson2/lesson2_codealong.ipynb | WNoxchi/Kawkasos | 42c5070a8fa4a5e2d6386dc19d385e82a1d73fb2 | [
"MIT"
] | null | null | null | FAI_old/lesson2/lesson2_codealong.ipynb | WNoxchi/Kawkasos | 42c5070a8fa4a5e2d6386dc19d385e82a1d73fb2 | [
"MIT"
] | 1 | 2018-06-17T12:08:25.000Z | 2018-06-17T12:08:25.000Z | 40.872669 | 575 | 0.551898 | [
[
[
" 10 May 2017 - Lecture 2 JNB Code Along - WH Nixalo\n\n[Notebook](https://github.com/fastai/courses/blob/ed1fb08d86df277d2736972a1ff1ac39ea1ac733/deeplearning1/nbs/lesson2.ipynb) | Lecture[1:20:00](https://www.youtube.com/watch?v=e3aM6XTekJc)\n## 1 Linear models with CNN features",
"_____no_output_____"
]
],
[
[
"# This is to point Python to my utils folder\nimport sys; import os\n# DIR = %pwd\nsys.path.insert(1, os.path.join('../utils'))\n\n# Rather than importing everything manually, we'll make things easy\n# and load them all in utils.py, and just import them from there.\nimport utils; reload(utils)\nfrom utils import *\n%matplotlib inline",
"Using Theano backend.\n"
]
],
[
[
"## 1.1 Intro\n\nWe need to find a way to convert the imagenet predictions to a probability of being a cat or a dog, since that is what the Kaggle copmetition requires us to submit. We could use the imagenet hierarchy to download a list of all the imagenet categories in each of the dog and cat groups, and could then solve our problem in various ways, such as:\n\n* Finding the largest probability that's either a cat or a dog, and using that label\n* Averaging the prbability of all the cat categories and comparing it to the average of all the dog categories.\n\nBut these approaches have some downsides:\n\n* They require manual coding for something that we should be able to learn from the data\n* They ignore information available in the predictions; for instance, if the models predict that there is a bone in th eimage, it's more likely to be a dog than a cat.\n\nA very simple solution to both of these problems is to learn a linear model that is trained using the 1,000 predictions from the imagenet model for each image as input, and the dog/cat label as target.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom __future__ import division, print_function\nimport os, json\nfrom glob import glob\nimport numpy as np\nimport scipy\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import confusion_matrix\nnp.set_printoptions(precision=4, linewidth=100)\nfrom matplotlib import pyplot as plt\nimport utils; reload(utils)\nfrom utils import plots, get_batches, plot_confusion_matrix, get_data",
"_____no_output_____"
],
[
"from numpy.random import random, permutation\nfrom scipy import misc, ndimage\nfrom scipy.ndimage.interpolation import zoom\n\nimport keras\nfrom keras import backend as K\nfrom keras.utils.data_utils import get_file\nfrom keras.models import Sequential\nfrom keras.layers import Input\nfrom keras.layers.core import Flatten, Dense, Dropout, Lambda\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.preprocessing import image",
"_____no_output_____"
]
],
[
[
"## 1.2 Linear models in keras\n\nLet's forget the motivating example for a second a see how we can create a simple Linear model in Keras:\n\nEach of the ```Dense()``` layers is just a *linear* model, followed by a simple *activation function*.\n\nIn a linear model each row is calculated as ```sum(row * weights)```, where weights need to be learnt from the data & will be the same for every row. Let's create some data that we know is linearly related:",
"_____no_output_____"
]
],
[
[
"# we'll create a random matrix w/ 2 columns; & do a MatMul to get our \n# y value using a vector [2, 3] & adding a constant of 1.\nx = random((30, 2))\ny = np.dot(x, [2., 3.]) + 1.",
"_____no_output_____"
],
[
"x[:5]",
"_____no_output_____"
],
[
"y[:5]",
"_____no_output_____"
]
],
[
[
"We can use kears to create a simple linear model (```Dense()``` - with no activation - in Keras) and optimize it using SGD to minimize mean squared error.",
"_____no_output_____"
]
],
[
[
"# Keras calls the Linear Model \"Dense\"; aka. \"Fully-Connected\" in other \n# libraries.\n# So when we go 'Dense' w/ an input of 2 columns, & output of 1 col,\n# we're defining a linear model that can go from the 2 col array above, to \n# the 1 col output of y above.\n# Sequential() is a way of building multiple-layer networks. It takes an \n# array containing all the layers in your NN. A LM is a single Dense layer.\n# This automatically initializes the weights sensibly & calc derivatives.\n# We just tell it how to optimize the weights: SGD w/ LR=0.1, minz(MSE).\nlm = Sequential([Dense(1, input_shape=(2,))])\nlm.compile(optimizer=SGD(lr=0.1), loss='mse')",
"_____no_output_____"
],
[
"# find out our loss function w random weights\nlm.evaluate(x, y, verbose=0)",
"_____no_output_____"
],
[
"# now run SGD for 5 epochs & watch the loss improve\n# lm.fit(..) does the solving\nlm.fit(x, y, nb_epoch=5, batch_size=1)",
"Epoch 1/5\n30/30 [==============================] - 0s - loss: 1.6037 \nEpoch 2/5\n30/30 [==============================] - 0s - loss: 0.1901 \nEpoch 3/5\n30/30 [==============================] - 0s - loss: 0.1220 \nEpoch 4/5\n30/30 [==============================] - 0s - loss: 0.0789 \nEpoch 5/5\n30/30 [==============================] - 0s - loss: 0.0431 \n"
],
[
"# now evaluate and see the improvement:\nlm.evaluate(x, y, verbose=0)",
"_____no_output_____"
],
[
"# take a look at the weights, they should be virt. equal to 2, 3, and 1:\nlm.get_weights()",
"_____no_output_____"
],
[
"# so let's run another 5 epochs and see if this improves things:\nlm.fit(x, y, nb_epoch=5, batch_size=1)\nlm.evaluate(x, y, verbose=0)",
"Epoch 1/5\n30/30 [==============================] - 0s - loss: 0.0296 \nEpoch 2/5\n30/30 [==============================] - 0s - loss: 0.0194 \nEpoch 3/5\n30/30 [==============================] - 0s - loss: 0.0114 \nEpoch 4/5\n30/30 [==============================] - 0s - loss: 0.0082 \nEpoch 5/5\n30/30 [==============================] - 0s - loss: 0.0055 \n"
],
[
"# and take a look at the new weights:\nlm.get_weights()",
"_____no_output_____"
]
],
[
[
"Above is everything Keras is doing behind the scenes.\nSo, if we pass multiple layers to Keras via ```Sequential(..)```, we can start to build & optimize Deep Neural Networks.\n\nBefore that, we can still use the single-layer LM to create a pretty decent entry to the dogs-vs-cats Kaggle competition.",
"_____no_output_____"
],
[
"## 1.3 Train Linear Model on Predictions\n\nForgetting finetuning -- how do we take the output of an ImageNet network and as simply as possible, create a a good entry to the cats-vs-dogs competition? -- Our current ImageNet network returns a thousand probabilities but we need just cat vs dog. We don't want to manually write code to roll of the hierarchy into cats/dogs.\n\nSo what we can do is learn a Linear Model that takes the output of the ImageNet model, all it's 1000 predictions, and uses that as input, and uses the dog/cat label as the target -- and that LM would solve our problem.",
"_____no_output_____"
],
[
"### 1.3.1 Training the model\n\nWe start with some basic config steps. We copy a small amount of our data into a 'sample' directory, with the exact same structure as our 'train' directory -- this is *always* a good idea in *all* Machine Learning, since we should do all of our initial testing using a dataset small enough that we never have to wait for it.",
"_____no_output_____"
]
],
[
[
"# setup the directories\nos.mkdir('data')\nos.mkdir('data/dogscats')\n\npath = \"data/dogscats/\"\nmodel_path = path + 'models/'\n# if the path to our models DNE, make it\nif not os.path.exists(model_path): os.mkdir(model_path)\n# NOTE: os.mkdir(..) only works for a single folder\n# Also will throw error if dir already exists",
"_____no_output_____"
]
],
[
[
"We'll process as many images at a time as we can. This is a case of T&E to find the max batch size that doesn't cause a memory error.",
"_____no_output_____"
]
],
[
[
"batch_size = 100",
"_____no_output_____"
]
],
[
[
"We need to start with our VGG 16 model, since we're using its predictions & features",
"_____no_output_____"
]
],
[
[
"from vgg16 import Vgg16\nvgg = Vgg16()\nmodel = vgg.model",
"_____no_output_____"
]
],
[
[
"Our overall approach here will be:\n1. Get the true labels for every image\n2. Get the 1,000 ImageNet category predictions for every image\n3. Feed these predictions as input to a simple linear model.\nLet's start by grabbing training and validation batches.\n\n(so that's a thousand floats for every image)\n\nuse an output of 2 as input to LM\n\noutput of 1 as target to our LM, create LM & build predictions\n\nAs usual, we start by creating our batches & validation vatches",
"_____no_output_____"
]
],
[
[
"# Use batch size of 1 since we're just doing preprocessing on the CPU\nval_batches = get_batches(path + 'valid', shuffle=False, batch_size=1)\nbatches = get_batches(path + 'train', shuffle=False, batch_size=1)",
"Found 50 images belonging to 2 classes.\nFound 352 images belonging to 2 classes.\n"
]
],
[
[
"Getting the 1,000 categories for each image will take a long time & there's no reason to do it again & again. So after we do it the first time, let's save the resulting arrays.",
"_____no_output_____"
]
],
[
[
"import bcolz\ndef save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()\ndef load_array(fname): return bcolz.open(fname)[:]",
"_____no_output_____"
]
],
[
[
"It's also time consuming to convert all the images in the 224x224 format VGG 16 expects. So ```get_data``` will also store a Numpy array of the results of that conversion.",
"_____no_output_____"
]
],
[
[
"# ?? shows you the source code\n??get_data",
"_____no_output_____"
],
[
"val_data = get_data(path + 'valid')\ntrn_data = get_data(path + 'train')",
"Found 50 images belonging to 2 classes.\nFound 352 images belonging to 2 classes.\n"
],
[
"# so what the above does is createa a Numpy array with our full set of\n# training images -- 352 imgs, ea. of which is 3 colors, and 224x224\ntrn_data.shape",
"_____no_output_____"
],
[
"save_array(model_path + 'train_data.bc', trn_data)\nsave_array(model_path + 'valid_data.bc', val_data)",
"_____no_output_____"
]
],
[
[
"& Now we can load our training & validation data layer without recalculating them",
"_____no_output_____"
]
],
[
[
"trn_data = load_array(model_path + 'train_data.bc')\nval_data = load_array(model_path + 'valid_data.bc')",
"_____no_output_____"
],
[
"val_data.shape # our 50 validatn imgs",
"_____no_output_____"
]
],
[
[
"Most Deep Learning is done w/ One-Hot Encoding: prediction = 1, all other classes = 0; & Keras expects labels in a very specific format. Example of One Hot Encoding:\n```\nClass: 1Ht Enc:\n 0 100\n 1 010\n 2 001\n 1 010\n 0 100\n```\n1Ht Encoding is used because you can perform a MatMul since the num. weights == encoding length. In the above example W would be a vector of: ```w1, w2, w3```\n\nThis lets you do Deep Learning very easily with categorical variables\n\nKeras returns *classes* as a single column, so we convert to 1Ht.",
"_____no_output_____"
]
],
[
[
"def onehot(x): return np.array(OneHotEncoder().fit_transform(x.reshape(-1, 1)).todense())",
"_____no_output_____"
],
[
"# So, next thing we want to do is grab our labels and One-Hot Encode them\nval_classes = val_batches.classes\ntrn_classes = batches.classes\nval_labels = onehot(val_classes)\ntrn_labels = onehot(trn_classes)",
"_____no_output_____"
],
[
"trn_classes.shape # Keras single col of all imgs",
"_____no_output_____"
],
[
"trn_labels.shape # One-Hot Encoded: 2 bit-width col <--> 2 classes",
"_____no_output_____"
],
[
"trn_classes[:4] # taking a look at 1st 4 classes",
"_____no_output_____"
],
[
"trn_labels[:4] # seeing the 1st 4 labels are 1Ht encoded",
"_____no_output_____"
]
],
[
[
"Now we can finally do Step No.2: get the 1,000 ImageNet categ. preds for every image. Keras makes this easy for us. We can simple call ```model.predict(..)``` and pass in our data",
"_____no_output_____"
]
],
[
[
"trn_features = model.predict(trn_data, batch_size=batch_size)\nval_features = model.predict(val_data, batch_size=batch_size)",
"_____no_output_____"
],
[
"trn_features.shape # we can see it is indeed No. imgs x 1000 categories",
"_____no_output_____"
],
[
"# let's take a look at one of the images (displaying all its categs)\ntrn_features[0]",
"_____no_output_____"
]
],
[
[
"Not surprisingly, nearly all of these numbers are near zero.\n\nNow we can define our linear model, just like we did earlier; now that we have our 1000 features for each image\n\n",
"_____no_output_____"
]
],
[
[
"# 1000 inputs, since those're the saved features, and 2 outputs: dog & cat\nlm = Sequential([Dense(2, activation='softmax', input_shape=(1000,))])\nlm.compile(optimizer=RMSprop(lr=0.1), loss='categorical_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"& Now we're ready to fit the model! RMSprop is somewhat better than SGD. It's a minor tweak on SGD that tends to be much faster.",
"_____no_output_____"
]
],
[
[
"batch_size=4",
"_____no_output_____"
],
[
"lm.fit(trn_features, trn_labels, batch_size=batch_size, nb_epoch=3, \n validation_data = (val_features, val_labels))",
"Train on 352 samples, validate on 50 samples\nEpoch 1/3\n352/352 [==============================] - 0s - loss: 0.1856 - acc: 0.9375 - val_loss: 0.1782 - val_acc: 0.9000\nEpoch 2/3\n352/352 [==============================] - 0s - loss: 0.0682 - acc: 0.9688 - val_loss: 0.1416 - val_acc: 0.9200\nEpoch 3/3\n352/352 [==============================] - 0s - loss: 0.0517 - acc: 0.9773 - val_loss: 0.1161 - val_acc: 0.9200\n"
],
[
"# let's have a look at our model\nlm.summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\ndense_6 (Dense) (None, 2) 2002 dense_input_3[0][0] \n====================================================================================================\nTotal params: 2,002\nTrainable params: 2,002\nNon-trainable params: 0\n____________________________________________________________________________________________________\n"
]
],
[
[
"So it ran almost instantly because running 3 epochs on a single layer with 2000 is really quick for my little i5 MacBook :3\n\nWe got an accuracy of ```.92```. Let's run another 3 epochs and see if this changes:",
"_____no_output_____"
]
],
[
[
"lm.fit(trn_features, trn_labels, batch_size=batch_size, nb_epoch=3,\n validation_data = (val_features, val_labels))",
"Train on 352 samples, validate on 50 samples\nEpoch 1/3\n352/352 [==============================] - 0s - loss: 0.0267 - acc: 0.9915 - val_loss: 0.2057 - val_acc: 0.9000\nEpoch 2/3\n352/352 [==============================] - 0s - loss: 0.0266 - acc: 0.9886 - val_loss: 0.2279 - val_acc: 0.9000\nEpoch 3/3\n352/352 [==============================] - 0s - loss: 0.0221 - acc: 0.9915 - val_loss: 0.2109 - val_acc: 0.9400\n"
]
],
[
[
"(I actually ran 9, bc on a tiny set of 350 images it took a bit more to improve: no change on the 1st, dropped to ```.90``` on the 2nd, and finally up to ```.94``` on the final)\n\nHere we haven't done any finetuning. All we did was take the ImageNet model of predictions, and built a model that maps from those predictions to either 'Cat' or 'Dog'\n\nThis is actually what most amatuer Machine Learning researchers do. They take a pretrained model, they grab the outputs, stick it into a linear model -- and it actually often works pretty well!\n\nTo get this 94% accuracy, we haven't done used any magical libraries at all. We just grabbed our batches up, we turned the images into a Numpy array, we took the Numpy array and ran ```model.predict(..)``` on them, we grabbed our labels and One-Hot Encoded them, and finally we took the 1Ht Enc labels and the 1,000 probabilities and fed them to a Linear Model with a thousand inputs and 2 outputs - and trained it and ended up with a validationa ccuracy of ```0.9400```",
"_____no_output_____"
],
[
"### 1.3.3 About Activation Functions\n\nThe last thing we're going to do is take this and turn it into a finetuning model. For that we need to understand activation functions. We've been looking at our Linear Model as a series of matrix multiplies. But a series of matrix multiplies is itself a matrix multiply --> a series of linear models is itself a linear model. Deep Learning must be doing something more than just this. At each stage (layer) it is putting the activations, the results of the previous layer, through a non-Linearity of some sort. ```tanh```, ```sigmoid```, ```max(0,x)``` (ReLU), etc.\n\nUsing the activation functions at each layer, we now have a genuine, modern (ca.2017), Deep Learning Neural Network. This kind of NN is capable of approximating any given function of arbitrary complexity.\n\nA series of matrix-multiplies & activation (sa. ReLU) is actually what's going on in a DLNN.\n\nRemember how we defined our model:\n\n```\nlm = Sequential([Dense(2, activation='softmax', input_shape(1000,))])\n```\n\nAnd the definition of a fully connected layer in the original VGG:\n\n```\nmodel.add(Dense(4096, activation='relu'))\n```\n\nWhat that ```activation``` parameter says is \"after you do the Matrix Π, do a activation of (in this case): ```max(0, x)```\"",
"_____no_output_____"
],
[
"## 2 Modifying the Model\n## 2.1 Retrain last layer's Linear Model\nSo what we need to do is take our final layer, which has a Matrix Multip and & activation function, and we're going to remove it. To understand why, take a look at our DLNN layers:",
"_____no_output_____"
]
],
[
[
"vgg.model.summary()",
"____________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n====================================================================================================\nlambda_1 (Lambda) (None, 3, 224, 224) 0 lambda_input_1[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_1 (ZeroPadding2D) (None, 3, 226, 226) 0 lambda_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_1 (Convolution2D) (None, 64, 224, 224) 1792 zeropadding2d_1[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_2 (ZeroPadding2D) (None, 64, 226, 226) 0 convolution2d_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_2 (Convolution2D) (None, 64, 224, 224) 36928 zeropadding2d_2[0][0] \n____________________________________________________________________________________________________\nmaxpooling2d_1 (MaxPooling2D) (None, 64, 112, 112) 0 convolution2d_2[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_3 (ZeroPadding2D) (None, 64, 114, 114) 0 maxpooling2d_1[0][0] \n____________________________________________________________________________________________________\nconvolution2d_3 (Convolution2D) (None, 128, 112, 112) 73856 zeropadding2d_3[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_4 (ZeroPadding2D) (None, 128, 114, 114) 0 convolution2d_3[0][0] \n____________________________________________________________________________________________________\nconvolution2d_4 (Convolution2D) (None, 128, 112, 112) 147584 zeropadding2d_4[0][0] \n____________________________________________________________________________________________________\nmaxpooling2d_2 (MaxPooling2D) (None, 128, 56, 56) 0 convolution2d_4[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_5 (ZeroPadding2D) (None, 128, 58, 58) 0 maxpooling2d_2[0][0] \n____________________________________________________________________________________________________\nconvolution2d_5 (Convolution2D) (None, 256, 56, 56) 295168 zeropadding2d_5[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_6 (ZeroPadding2D) (None, 256, 58, 58) 0 convolution2d_5[0][0] \n____________________________________________________________________________________________________\nconvolution2d_6 (Convolution2D) (None, 256, 56, 56) 590080 zeropadding2d_6[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_7 (ZeroPadding2D) (None, 256, 58, 58) 0 convolution2d_6[0][0] \n____________________________________________________________________________________________________\nconvolution2d_7 (Convolution2D) (None, 256, 56, 56) 590080 zeropadding2d_7[0][0] \n____________________________________________________________________________________________________\nmaxpooling2d_3 (MaxPooling2D) (None, 256, 28, 28) 0 convolution2d_7[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_8 (ZeroPadding2D) (None, 256, 30, 30) 0 maxpooling2d_3[0][0] \n____________________________________________________________________________________________________\nconvolution2d_8 (Convolution2D) (None, 512, 28, 28) 1180160 zeropadding2d_8[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_9 (ZeroPadding2D) (None, 512, 30, 30) 0 convolution2d_8[0][0] \n____________________________________________________________________________________________________\nconvolution2d_9 (Convolution2D) (None, 512, 28, 28) 2359808 zeropadding2d_9[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_10 (ZeroPadding2D) (None, 512, 30, 30) 0 convolution2d_9[0][0] \n____________________________________________________________________________________________________\nconvolution2d_10 (Convolution2D) (None, 512, 28, 28) 2359808 zeropadding2d_10[0][0] \n____________________________________________________________________________________________________\nmaxpooling2d_4 (MaxPooling2D) (None, 512, 14, 14) 0 convolution2d_10[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_11 (ZeroPadding2D) (None, 512, 16, 16) 0 maxpooling2d_4[0][0] \n____________________________________________________________________________________________________\nconvolution2d_11 (Convolution2D) (None, 512, 14, 14) 2359808 zeropadding2d_11[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_12 (ZeroPadding2D) (None, 512, 16, 16) 0 convolution2d_11[0][0] \n____________________________________________________________________________________________________\nconvolution2d_12 (Convolution2D) (None, 512, 14, 14) 2359808 zeropadding2d_12[0][0] \n____________________________________________________________________________________________________\nzeropadding2d_13 (ZeroPadding2D) (None, 512, 16, 16) 0 convolution2d_12[0][0] \n____________________________________________________________________________________________________\nconvolution2d_13 (Convolution2D) (None, 512, 14, 14) 2359808 zeropadding2d_13[0][0] \n____________________________________________________________________________________________________\nmaxpooling2d_5 (MaxPooling2D) (None, 512, 7, 7) 0 convolution2d_13[0][0] \n____________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 25088) 0 maxpooling2d_5[0][0] \n____________________________________________________________________________________________________\ndense_3 (Dense) (None, 4096) 102764544 flatten_1[0][0] \n____________________________________________________________________________________________________\ndropout_1 (Dropout) (None, 4096) 0 dense_3[0][0] \n____________________________________________________________________________________________________\ndense_4 (Dense) (None, 4096) 16781312 dropout_1[0][0] \n____________________________________________________________________________________________________\ndropout_2 (Dropout) (None, 4096) 0 dense_4[0][0] \n____________________________________________________________________________________________________\ndense_5 (Dense) (None, 1000) 4097000 dropout_2[0][0] \n====================================================================================================\nTotal params: 138,357,544\nTrainable params: 138,357,544\nNon-trainable params: 0\n____________________________________________________________________________________________________\n"
]
],
[
[
"The last layer is a Dense (FC/Linear) layer. It doesn't make sense to add another dense layer atop of a dense layer that's already tuned to classify the 1,000 ImageNet categories. We'll remove it, and use the previous Dense layer with it's 4096 activations to find Cats & Dogs.\n\nWe do this by calling ```model.pop()``` to pop off the last layer, and set all remaining layers to be fixed, so they aren't altered.",
"_____no_output_____"
]
],
[
[
"model.pop()\nfor layer in model.layers: layer.trainable=False",
"_____no_output_____"
]
],
[
[
"Now we add our final Cat vs Dog layer",
"_____no_output_____"
]
],
[
[
"model.add(Dense(2, activation='softmax'))",
"_____no_output_____"
]
],
[
[
"To see what happened when we called ```vgg.finetune()``` earlier:\nBasically what it does is a ```model.pop()``` and a ```model.add(Dense(..))```",
"_____no_output_____"
]
],
[
[
"??vgg.finetune()",
"_____no_output_____"
]
],
[
[
"After we add our new final layer, we'll setup our batches to use preprocessed images (and we'll also *shuffle* the traiing batches to add more randomness when using multiple epochs):",
"_____no_output_____"
]
],
[
[
"gen = image.ImageDataGenerator()\nbatches = gen.flow(trn_data, trn_labels, batch_size=batch_size, shuffle=True)\nval_batches = gen.flow(val_data, val_labels, batch_size=batch_size, shuffle=False)",
"_____no_output_____"
]
],
[
[
"Now we have a model designed to classify Cats vs Dogs instead of the 1,000 ImageNet categories & THEN Cats vs Dogs. After this, everything is done the same as before. Compile the model & choose optimizer, fit the model (btw, whenever we work with batches in Keras, we'll be using ```model.function_generator(..)``` instead of ```model.function(..)```\n\nSo let's do that and see what we get after 2 epochs of training:\nWe'll also define a function for fitting models to save time typing.",
"_____no_output_____"
]
],
[
[
"# NOTE: now use batches.n instead of batches.N\ndef fit_model(model, batches, val_batches, nb_epoch=1):\n model.fit_generator(batches, samples_per_epoch=batches.n, nb_epoch=nb_epoch,\n validation_data=val_batches, nb_val_samples=val_batches.n)",
"_____no_output_____"
]
],
[
[
"It'll run a bit slowly since it has to calculate all previous layers in order to know what input to pass to the new final layer. We can save time by precalculating the output of the penultimate layer, like we did for the final layer earlier. Note for later work.",
"_____no_output_____"
]
],
[
[
"# compile the new model\nopt = RMSprop(lr=0.1)\nmodel.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])",
"_____no_output_____"
],
[
"# then fit it\nfit_model(model, batches, val_batches, nb_epoch=2)",
"Epoch 1/2\n352/352 [==============================] - 170s - loss: 1.4733 - acc: 0.8949 - val_loss: 1.6794 - val_acc: 0.8600\nEpoch 2/2\n352/352 [==============================] - 164s - loss: 1.5168 - acc: 0.9006 - val_loss: 0.3224 - val_acc: 0.9800\n"
]
],
[
[
"Note how little actual code was needed to finetune the model. Because this is such an important and common operation, Keras is set up to make it as easy as possible. Not external helper functions were needed.\n\nIt's a good idea to save weights of all your models, so you can re-use them later. Be sure to note the git log number of your model when keeping a research journal of your results.",
"_____no_output_____"
]
],
[
[
"model.save_weights(model_path + 'finetune1.h5')",
"_____no_output_____"
],
[
"# We can now use this as a good starting point for future Dogs v Cats models\nmodel.load_weights(model_path + 'finetune1.h5')",
"_____no_output_____"
],
[
"model.evaluate(val_data, val_labels)",
"50/50 [==============================] - 18s \n"
]
],
[
[
"Week 2 Assignments:\n\n**Take it further** -- now that you know what's going on with finetuning and linear layers -- think about everything you know: the evaluation function, the categorical cross entropy loss function, finetuning: and see if you can find ways to make your model better and see how high up the rankings in Kaggle you can get.\n\n**If you want to push yourself** -- see if you can do the same thing by writing all the code yourself. Don't use the class notebooks at all -- build it all from scratch.\n\n**If you want to go *Even* further** -- see if you can enter another Kaggle competition (Galaxy Zoo, Plankton, Statefarm Distracted Driver, etc)\n\n\n-- end of lecture 2 --\n\n10 May 2017 WNx",
"_____no_output_____"
],
[
"We can look at the earlier prediction examples visualizations by redefiing *probs* and *preds* and re-using our earlier code.",
"_____no_output_____"
]
],
[
[
"preds = model.predict_classes(val_data, batch_size=batch_size)\nprobs = model.predict_proba(val_data, batch_size=batch_size)[:,0]",
"_____no_output_____"
]
],
[
[
"### 2.2 Retraining more layers",
"_____no_output_____"
],
[
"### 2.2.1 An Introduction to back-propagation",
"_____no_output_____"
],
[
"### 2.2.2 Training multiple layers in Keras",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e79ce1fd63c19abbf4c41054d2352319d5e3897d | 10,409 | ipynb | Jupyter Notebook | notebooks/example_train_single_ESN.ipynb | anilozdemir/EchoVPR | c32274c8bf4dd8642f3efb99f837e3feff3287ae | [
"MIT"
] | 2 | 2021-11-08T16:34:57.000Z | 2021-11-18T06:51:48.000Z | notebooks/example_train_single_ESN.ipynb | anilozdemir/EchoVPR | c32274c8bf4dd8642f3efb99f837e3feff3287ae | [
"MIT"
] | null | null | null | notebooks/example_train_single_ESN.ipynb | anilozdemir/EchoVPR | c32274c8bf4dd8642f3efb99f837e3feff3287ae | [
"MIT"
] | 1 | 2021-11-10T20:49:32.000Z | 2021-11-10T20:49:32.000Z | 29.403955 | 250 | 0.498415 | [
[
[
"<a href=\"https://colab.research.google.com/github/anilozdemir/EchoVPR/blob/main/notebooks/example_train_single_ESN.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Get EchoVPR from GitHub",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/anilozdemir/EchoVPR.git",
"Cloning into 'EchoVPR'...\nremote: Enumerating objects: 82, done.\u001b[K\nremote: Counting objects: 100% (82/82), done.\u001b[K\nremote: Compressing objects: 100% (60/60), done.\u001b[K\nremote: Total 82 (delta 33), reused 53 (delta 18), pack-reused 0\u001b[K\nUnpacking objects: 100% (82/82), done.\n"
]
],
[
[
"## Install `echovpr` module\n",
"_____no_output_____"
]
],
[
[
"%cd EchoVPR/src",
"/content/EchoVPR/src\n"
],
[
"!python setup.py develop",
"running develop\nrunning egg_info\ncreating echovpr.egg-info\nwriting echovpr.egg-info/PKG-INFO\nwriting dependency_links to echovpr.egg-info/dependency_links.txt\nwriting top-level names to echovpr.egg-info/top_level.txt\nwriting manifest file 'echovpr.egg-info/SOURCES.txt'\nwriting manifest file 'echovpr.egg-info/SOURCES.txt'\nrunning build_ext\nCreating /usr/local/lib/python3.7/dist-packages/echovpr.egg-link (link to .)\nAdding echovpr 1.0 to easy-install.pth file\n\nInstalled /content/EchoVPR/src\nProcessing dependencies for echovpr==1.0\nFinished processing dependencies for echovpr==1.0\n"
]
],
[
[
"# Train ESN and ESN+SpaRCe",
"_____no_output_____"
]
],
[
[
"import torch\n\nfrom echovpr.datasets import getHiddenRepr, DataSets, Tolerance\nfrom echovpr.networks import singleESN, getSparsity\nfrom echovpr.experiments import ESN_Exp, getValidationIndices",
"_____no_output_____"
]
],
[
[
"## Get NetVLAD Hidden Representation and Validation Indices",
"_____no_output_____"
]
],
[
[
"ds = 'GardensPoint'\ntol = Tolerance[ds]\n\n# Get NetVLAD Hidden Representation\nhiddenReprTrain, hiddenReprTest = getHiddenRepr('GardensPoint')\n# Get Input and Output Size; First element of shape: (number of images == number of classes == nOutput); Second element: size of hidden representations\nnOutput, nInput = hiddenReprTrain.shape \n# Get Validation Indices\nTestInd, ValidationInd = getValidationIndices(1, nOutput)",
"_____no_output_____"
]
],
[
[
"# ESN Training",
"_____no_output_____"
]
],
[
[
"nRes = 1000\nnCon = 10\nnTrial = 10\nnEpoch = 50\nnBatch = 5\nlR = 0.01\ngamma = 0.0003 \nalpha = 0.68",
"_____no_output_____"
],
[
"_ESN_model_ = lambda randSeed: singleESN(nInput, nOutput, nReservoir=nRes, randomSeed = randSeed, device='cpu', useReadout = False,\n sparsity = getSparsity(nCon, nRes), alpha = alpha, gamma = gamma, rho = 0.99)",
"_____no_output_____"
],
[
"model = _ESN_model_(0)\nexp = ESN_Exp(model, hiddenReprTrain, hiddenReprTest, TestInd, ValidationInd, tol)\nresults = exp.train_esn(nEpoch=nEpoch, lR=lR, nBatch=nBatch, returnData=True, returnDataAll=False)\nresults['AccTest'][-1].mean()",
"_____no_output_____"
]
],
[
[
"# ESN+SpaRCe Training",
"_____no_output_____"
]
],
[
[
"nRes = 1000\nnCon = 10\nnTrial = 10\nnEpoch = 50\nnBatch = 5\nlR = 0.01\ngamma = 0.0003 \nalpha = 0.74\nquantile = 0.4 \nlrDiv = 10",
"_____no_output_____"
],
[
"_SPARCE_model_ = lambda randSeed: singleESN(nInput, nOutput, nReservoir = nRes, randomSeed = randSeed, device='cpu', useReadout = False,\n sparsity = getSparsity(nCon, nRes), alpha = alpha, gamma = gamma, rho = 0.99)",
"_____no_output_____"
],
[
"model = _SPARCE_model_(0)\nexp = ESN_Exp(model, hiddenReprTrain, hiddenReprTest, TestInd, ValidationInd, tol)\nresults = exp.train_sparce(nEpoch=nEpoch, lR=lR, nBatch=nBatch, quantile=quantile, lr_divide_factor=lrDiv, returnData=True, returnDataAll=False)\nresults['AccTest'][-1].mean()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e79ceb10e2cbe6212925faf5c042f78d8ca483c6 | 6,646 | ipynb | Jupyter Notebook | machine_learning/hyperparameter_tuning/hyperparam-dask.ipynb | mmccarty/saturn-cloud-examples | 60635ef9b5bf1f67bad0c2c865d0d50addcdccb9 | [
"BSD-3-Clause"
] | 17 | 2020-06-14T03:47:35.000Z | 2022-03-07T00:25:23.000Z | machine_learning/hyperparameter_tuning/hyperparam-dask.ipynb | mmccarty/saturn-cloud-examples | 60635ef9b5bf1f67bad0c2c865d0d50addcdccb9 | [
"BSD-3-Clause"
] | 12 | 2020-07-22T22:40:09.000Z | 2021-03-17T14:10:27.000Z | machine_learning/hyperparameter_tuning/hyperparam-dask.ipynb | mmccarty/saturn-cloud-examples | 60635ef9b5bf1f67bad0c2c865d0d50addcdccb9 | [
"BSD-3-Clause"
] | 8 | 2020-06-14T03:47:23.000Z | 2021-11-20T15:14:04.000Z | 24.433824 | 123 | 0.540175 | [
[
[
"# Hyperparameter tuning\n\n## Dask + scikit-learn\n\n<img src=\"https://docs.dask.org/en/latest/_images/dask_horizontal.svg\" width=\"500\">",
"_____no_output_____"
]
],
[
[
"from dask.distributed import Client\nfrom dask_saturn import SaturnCluster\n\ncluster = SaturnCluster(\n scheduler_size='2xlarge',\n worker_size='2xlarge',\n nthreads=8,\n n_workers=3,\n)\nclient = Client(cluster)\ncluster",
"[2020-12-04 17:54:56] INFO - dask-saturn | Cluster is ready\n"
]
],
[
[
"# Load data and feature engineering",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport datetime\nimport dask.dataframe as dd\n\ntaxi = dd.read_csv(\n 's3://nyc-tlc/trip data/yellow_tripdata_2019-01.csv',\n parse_dates=['tpep_pickup_datetime', 'tpep_dropoff_datetime'],\n storage_options={'anon': True},\n).sample(frac=0.1, replace=False)",
"_____no_output_____"
],
[
"taxi['pickup_weekday'] = taxi.tpep_pickup_datetime.dt.weekday\ntaxi['pickup_weekofyear'] = taxi.tpep_pickup_datetime.dt.weekofyear\ntaxi['pickup_hour'] = taxi.tpep_pickup_datetime.dt.hour\ntaxi['pickup_minute'] = taxi.tpep_pickup_datetime.dt.minute\ntaxi['pickup_year_seconds'] = (taxi.tpep_pickup_datetime - datetime.datetime(2019, 1, 1, 0, 0, 0)).dt.seconds\ntaxi['pickup_week_hour'] = (taxi.pickup_weekday * 24) + taxi.pickup_hour\ntaxi['passenger_count'] = taxi.passenger_count.astype(float).fillna(-1)\ntaxi = taxi.fillna(value={'VendorID': 'missing', 'RatecodeID': 'missing', 'store_and_fwd_flag': 'missing' })\n\ntaxi = taxi.persist()",
"_____no_output_____"
]
],
[
[
"# Run grid search",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import ElasticNet\nfrom dask_ml.compose import ColumnTransformer\nfrom dask_ml.preprocessing import StandardScaler, DummyEncoder, Categorizer\nfrom dask_ml.model_selection import GridSearchCV\n\nnumeric_feat = ['pickup_weekday', 'pickup_weekofyear', 'pickup_hour', 'pickup_minute', \n 'pickup_year_seconds', 'pickup_week_hour', 'passenger_count']\ncategorical_feat = ['VendorID', 'RatecodeID', 'store_and_fwd_flag', \n 'PULocationID', 'DOLocationID']\nfeatures = numeric_feat + categorical_feat\ny_col = 'total_amount'\n\npipeline = Pipeline(steps=[\n ('categorize', Categorizer(columns=categorical_feat)),\n ('onehot', DummyEncoder(columns=categorical_feat)),\n ('scale', ColumnTransformer(\n transformers=[('num', StandardScaler(), numeric_feat)], \n remainder='passthrough',\n )),\n ('clf', ElasticNet(normalize=False, max_iter=100)),\n])\n\nparams = {\n 'clf__l1_ratio': np.arange(0, 1.01, 0.01),\n 'clf__alpha': [0, 0.5, 1, 2],\n}\n\ngrid_search = GridSearchCV(pipeline, params, cv=3)",
"_____no_output_____"
]
],
[
[
"## 3 nodes",
"_____no_output_____"
]
],
[
[
"cluster.scale(3)\nclient.wait_for_workers(3)",
"_____no_output_____"
],
[
"%%time\n_ = grid_search.fit(taxi[features], taxi[y_col])",
"CPU times: user 5.4 s, sys: 578 ms, total: 5.98 s\nWall time: 1h 3min 54s\n"
]
],
[
[
"## Scale up to 10 nodes",
"_____no_output_____"
]
],
[
[
"cluster.scale(10)\nclient.wait_for_workers(10)",
"_____no_output_____"
],
[
"%%time\n_ = grid_search.fit(taxi[features], taxi[y_col])",
"CPU times: user 3.14 s, sys: 275 ms, total: 3.41 s\nWall time: 19min 49s\n"
]
],
[
[
"## Scale up to 20 nodes",
"_____no_output_____"
]
],
[
[
"cluster.scale(20)\nclient.wait_for_workers(20)",
"_____no_output_____"
],
[
"%%time\n_ = grid_search.fit(taxi[features], taxi[y_col])",
"CPU times: user 2.57 s, sys: 257 ms, total: 2.83 s\nWall time: 10min 48s\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79ceda7b48f62d44638921983d78c8ca14c53e7 | 175,056 | ipynb | Jupyter Notebook | docs/notebooks/deep_learning/resnet_flax.ipynb | froystig/jaxopt | d0e3949b04c06960dfbb631386eee20ebffa50a6 | [
"Apache-2.0"
] | 2 | 2021-10-04T15:20:55.000Z | 2021-10-05T08:52:46.000Z | docs/notebooks/deep_learning/resnet_flax.ipynb | froystig/jaxopt | d0e3949b04c06960dfbb631386eee20ebffa50a6 | [
"Apache-2.0"
] | null | null | null | docs/notebooks/deep_learning/resnet_flax.ipynb | froystig/jaxopt | d0e3949b04c06960dfbb631386eee20ebffa50a6 | [
"Apache-2.0"
] | null | null | null | 337.945946 | 82,408 | 0.917952 | [
[
[
"# Resnet example with Flax and JAXopt.\n\n[](https://colab.research.google.com/github/google/jaxopt/blob/main/docs/notebooks/resnet_flax.ipynb)\n\n[*Mathieu Blondel*](https://mblondel.org/), [*Fabian Pedregosa*](https://fa.bianp.net)\n\nIn this notebook, we'll go through training a deep residual network with jaxopt.",
"_____no_output_____"
]
],
[
[
"%%capture\n%pip install jaxopt flax",
"_____no_output_____"
],
[
"from datetime import datetime\nimport collections\n\nfrom functools import partial\nfrom typing import Any, Callable, Sequence, Tuple\n\nfrom flax import linen as nn\n\nimport jax\nimport jax.numpy as jnp\n\nfrom jaxopt import loss\nfrom jaxopt import OptaxSolver\nfrom jaxopt import tree_util\n\nimport optax\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\n\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"Flags = collections.namedtuple(\n \"Flags\",\n [\n \"l2reg\", # amount of L2 regularization in the objective\n \"learning_rate\", # learning rate for the Adam optimizer\n \"epochs\", # number of passes over the dataset\n \"dataset\", # one of \"mnist\", \"kmnist\", \"emnist\", \"fashion_mnist\", \"cifar10\", \"cifar100\"\n \"model\", # model architecture, one of \"resnet1\", \"resnet18\", \"resnet34\"\n \"train_batch_size\", # Batch size at train time\n \"test_batch_size\" # Batch size at test time\n ])\n\nFLAGS = Flags(\n l2reg=0.0001,\n learning_rate=0.001,\n epochs=50,\n dataset=\"cifar10\",\n model=\"resnet18\",\n train_batch_size=128,\n test_batch_size=128)",
"_____no_output_____"
],
[
"def load_dataset(split, *, is_training, batch_size):\n version = 3\n ds, ds_info = tfds.load(\n f\"{FLAGS.dataset}:{version}.*.*\",\n as_supervised=True, # remove useless keys\n split=split,\n with_info=True)\n ds = ds.cache().repeat()\n if is_training:\n ds = ds.shuffle(10 * batch_size, seed=0)\n ds = ds.batch(batch_size)\n return iter(tfds.as_numpy(ds)), ds_info\n\n\nclass ResNetBlock(nn.Module):\n \"\"\"ResNet block.\"\"\"\n filters: int\n conv: Any\n norm: Any\n act: Callable\n strides: Tuple[int, int] = (1, 1)\n\n @nn.compact\n def __call__(self, x,):\n residual = x\n y = self.conv(self.filters, (3, 3), self.strides)(x)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (3, 3))(y)\n y = self.norm(scale_init=nn.initializers.zeros)(y)\n\n if residual.shape != y.shape:\n residual = self.conv(self.filters, (1, 1),\n self.strides, name='conv_proj')(residual)\n residual = self.norm(name='norm_proj')(residual)\n\n return self.act(residual + y)\n\n\nclass ResNet(nn.Module):\n \"\"\"ResNetV1.\"\"\"\n stage_sizes: Sequence[int]\n block_cls: Any\n num_classes: int\n num_filters: int = 64\n dtype: Any = jnp.float32\n act: Callable = nn.relu\n\n @nn.compact\n def __call__(self, x, train: bool = True):\n conv = partial(nn.Conv, use_bias=False, dtype=self.dtype)\n norm = partial(nn.BatchNorm,\n # use_running_average=True,\n use_running_average=not train,\n momentum=0.99,\n epsilon=0.001,\n dtype=self.dtype)\n\n x = conv(self.num_filters, (7, 7), (2, 2),\n padding=[(3, 3), (3, 3)],\n name='conv_init')(x)\n x = norm(name='bn_init')(x)\n x = nn.relu(x)\n x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')\n for i, block_size in enumerate(self.stage_sizes):\n for j in range(block_size):\n strides = (2, 2) if i > 0 and j == 0 else (1, 1)\n x = self.block_cls(self.num_filters * 2 ** i,\n strides=strides,\n conv=conv,\n norm=norm,\n act=self.act)(x)\n x = jnp.mean(x, axis=(1, 2))\n x = nn.Dense(self.num_classes, dtype=self.dtype)(x)\n x = jnp.asarray(x, self.dtype)\n return x\n\n\nResNet1 = partial(ResNet, stage_sizes=[1], block_cls=ResNetBlock)\nResNet18 = partial(ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock)\nResNet34 = partial(ResNet, stage_sizes=[3, 4, 6, 3], block_cls=ResNetBlock)",
"_____no_output_____"
]
],
[
[
"We'll now load our train and test dataset and plot a few of the training images.",
"_____no_output_____"
]
],
[
[
"# Hide any GPUs from TensorFlow. Otherwise TF might reserve memory and make\n# it unavailable to JAX.\ntf.config.experimental.set_visible_devices([], 'GPU')\n\ntrain_ds, ds_info = load_dataset(\"train\", is_training=True,\n batch_size=FLAGS.train_batch_size)\ntest_ds, _ = load_dataset(\"test\", is_training=False,\n batch_size=FLAGS.test_batch_size)\ninput_shape = (1,) + ds_info.features[\"image\"].shape\nnum_classes = ds_info.features[\"label\"].num_classes\niter_per_epoch_train = ds_info.splits['train'].num_examples // FLAGS.train_batch_size\niter_per_epoch_test = ds_info.splits['test'].num_examples // FLAGS.test_batch_size\n\n\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck']\nmb_images, mb_labels = next(train_ds)\n_, axes = plt.subplots(nrows=4, ncols=4, figsize=(6, 6))\nfor i in range(4):\n for j in range(4):\n k = i * 4 + j\n axes[i, j].imshow(mb_images[k], cmap=plt.cm.gray_r, interpolation=\"nearest\")\n axes[i, j].set_axis_off()\n axes[i, j].set_title(class_names[mb_labels[k]])",
"_____no_output_____"
],
[
"# Set up model.\nif FLAGS.model == \"resnet1\":\n net = ResNet1(num_classes=num_classes)\nelif FLAGS.model == \"resnet18\":\n net = ResNet18(num_classes=num_classes)\nelif FLAGS.model == \"resnet34\":\n net = ResNet34(num_classes=num_classes)\nelse:\n raise ValueError(\"Unknown model.\")\n\n\ndef predict(params, inputs, batch_stats, train=False):\n x = inputs.astype(jnp.float32) / 255.\n all_params = {\"params\": params, \"batch_stats\": batch_stats}\n if train:\n # Returns logits and net_state (which contains the key \"batch_stats\").\n return net.apply(all_params, x, train=train, mutable=[\"batch_stats\"])\n else:\n # Returns logits only.\n return net.apply(all_params, x, train=train, mutable=False)\n\nlogistic_loss = jax.vmap(loss.multiclass_logistic_loss)\n\n\ndef loss_from_logits(params, l2reg, logits, labels):\n mean_loss = jnp.mean(logistic_loss(labels, logits))\n sqnorm = tree_util.tree_l2_norm(params, squared=True)\n return mean_loss + 0.5 * l2reg * sqnorm\n\n\[email protected]\ndef accuracy_and_loss(params, l2reg, data, aux):\n inputs, labels = data\n logits = predict(params, inputs, aux, train=False)\n accuracy = jnp.mean(jnp.argmax(logits, axis=-1) == labels)\n loss = loss_from_logits(params, l2reg, logits, labels)\n return accuracy, loss\n\n\ndef loss_fun(params, l2reg, data, aux):\n inputs, labels = data\n logits, net_state = predict(params, inputs, aux, train=True)\n loss = loss_from_logits(params, l2reg, logits, labels)\n # batch_stats will be stored in state.aux\n return loss, net_state[\"batch_stats\"]",
"_____no_output_____"
],
[
"# Initialize solver.\nopt = optax.adam(learning_rate=FLAGS.learning_rate)\n\n# We need has_aux=True because loss_fun returns batch_stats.\nsolver = OptaxSolver(opt=opt, fun=loss_fun,\n maxiter=FLAGS.epochs * iter_per_epoch_train, has_aux=True)\n\n# Initialize parameters.\nrng = jax.random.PRNGKey(0)\ninit_vars = net.init({\"params\": rng}, jnp.ones(input_shape, net.dtype))\nparams = init_vars[\"params\"]\nbatch_stats = init_vars[\"batch_stats\"]\nstart = datetime.now().replace(microsecond=0)\n\n# Run training loop.\nstate = solver.init_state(params)\njitted_update = jax.jit(solver.update)\n\nall_test_error = []\nall_train_loss = []\nfor it in range(solver.maxiter):\n train_minibatch = next(train_ds)\n\n if state.iter_num % iter_per_epoch_train == iter_per_epoch_train - 1:\n # Once per epoch evaluate the model on the train and test sets.\n test_acc, test_loss = 0., 0.\n # make a pass over test set to compute test accuracy\n for _ in range(iter_per_epoch_test):\n tmp = accuracy_and_loss(params, FLAGS.l2reg, next(test_ds), batch_stats)\n test_acc += tmp[0] / iter_per_epoch_test\n test_loss += tmp[1] / iter_per_epoch_test\n\n train_acc, train_loss = 0., 0.\n # make a pass over train set to compute train accuracy\n for _ in range(iter_per_epoch_train):\n tmp = accuracy_and_loss(params, FLAGS.l2reg, next(train_ds), batch_stats)\n train_acc += tmp[0] / iter_per_epoch_train\n train_loss += tmp[1] / iter_per_epoch_train\n\n train_acc = jax.device_get(train_acc)\n train_loss = jax.device_get(train_loss)\n test_acc = jax.device_get(test_acc)\n test_loss = jax.device_get(test_loss)\n all_test_error.append(1 - test_acc)\n all_train_loss.append(train_loss)\n # time elapsed without microseconds\n time_elapsed = (datetime.now().replace(microsecond=0) - start)\n\n print(f\"[Epoch {state.iter_num // (iter_per_epoch_train+1) + 1}/{FLAGS.epochs}] \"\n f\"Train acc: {train_acc:.3f}, train loss: {train_loss:.3f}. \"\n f\"Test acc: {test_acc:.3f}, test loss: {test_loss:.3f}. \"\n f\"Time elapsed: {time_elapsed}\")\n\n\n params, state = jitted_update(params=params,\n state=state,\n l2reg=FLAGS.l2reg,\n data=train_minibatch,\n aux=batch_stats)\n batch_stats = state.aux",
"[Epoch 1/50] Train acc: 0.546, train loss: 1.496. Test acc: 0.527, test loss: 1.553. Time elapsed: 0:00:28\n[Epoch 2/50] Train acc: 0.591, train loss: 1.377. Test acc: 0.553, test loss: 1.506. Time elapsed: 0:00:32\n[Epoch 3/50] Train acc: 0.559, train loss: 1.515. Test acc: 0.523, test loss: 1.692. Time elapsed: 0:00:36\n[Epoch 4/50] Train acc: 0.609, train loss: 1.472. Test acc: 0.556, test loss: 1.722. Time elapsed: 0:00:40\n[Epoch 5/50] Train acc: 0.694, train loss: 1.153. Test acc: 0.607, test loss: 1.522. Time elapsed: 0:00:43\n[Epoch 6/50] Train acc: 0.801, train loss: 0.795. Test acc: 0.675, test loss: 1.274. Time elapsed: 0:00:47\n[Epoch 7/50] Train acc: 0.807, train loss: 0.802. Test acc: 0.665, test loss: 1.385. Time elapsed: 0:00:51\n[Epoch 8/50] Train acc: 0.736, train loss: 1.044. Test acc: 0.612, test loss: 1.688. Time elapsed: 0:00:54\n[Epoch 9/50] Train acc: 0.830, train loss: 0.767. Test acc: 0.662, test loss: 1.560. Time elapsed: 0:00:58\n[Epoch 10/50] Train acc: 0.686, train loss: 1.375. Test acc: 0.562, test loss: 2.260. Time elapsed: 0:01:02\n[Epoch 11/50] Train acc: 0.863, train loss: 0.679. Test acc: 0.668, test loss: 1.628. Time elapsed: 0:01:05\n[Epoch 12/50] Train acc: 0.837, train loss: 0.806. Test acc: 0.656, test loss: 1.876. Time elapsed: 0:01:09\n[Epoch 13/50] Train acc: 0.809, train loss: 0.926. Test acc: 0.642, test loss: 1.988. Time elapsed: 0:01:13\n[Epoch 14/50] Train acc: 0.778, train loss: 1.094. Test acc: 0.607, test loss: 2.307. Time elapsed: 0:01:16\n[Epoch 15/50] Train acc: 0.882, train loss: 0.643. Test acc: 0.674, test loss: 1.807. Time elapsed: 0:01:20\n[Epoch 16/50] Train acc: 0.818, train loss: 0.908. Test acc: 0.638, test loss: 2.143. Time elapsed: 0:01:24\n[Epoch 17/50] Train acc: 0.848, train loss: 0.796. Test acc: 0.664, test loss: 1.951. Time elapsed: 0:01:27\n[Epoch 18/50] Train acc: 0.914, train loss: 0.540. Test acc: 0.695, test loss: 1.789. Time elapsed: 0:01:31\n[Epoch 19/50] Train acc: 0.893, train loss: 0.628. Test acc: 0.681, test loss: 1.884. Time elapsed: 0:01:35\n[Epoch 20/50] Train acc: 0.889, train loss: 0.635. Test acc: 0.676, test loss: 1.877. Time elapsed: 0:01:38\n[Epoch 21/50] Train acc: 0.648, train loss: 2.020. Test acc: 0.517, test loss: 3.239. Time elapsed: 0:01:42\n[Epoch 22/50] Train acc: 0.930, train loss: 0.484. Test acc: 0.697, test loss: 1.713. Time elapsed: 0:01:46\n[Epoch 23/50] Train acc: 0.911, train loss: 0.547. Test acc: 0.685, test loss: 1.844. Time elapsed: 0:01:50\n[Epoch 24/50] Train acc: 0.922, train loss: 0.512. Test acc: 0.701, test loss: 1.778. Time elapsed: 0:01:53\n[Epoch 25/50] Train acc: 0.892, train loss: 0.631. Test acc: 0.672, test loss: 2.050. Time elapsed: 0:01:57\n[Epoch 26/50] Train acc: 0.910, train loss: 0.551. Test acc: 0.681, test loss: 1.849. Time elapsed: 0:02:01\n[Epoch 27/50] Train acc: 0.874, train loss: 0.706. Test acc: 0.663, test loss: 2.015. Time elapsed: 0:02:04\n[Epoch 28/50] Train acc: 0.936, train loss: 0.471. Test acc: 0.715, test loss: 1.746. Time elapsed: 0:02:08\n[Epoch 29/50] Train acc: 0.910, train loss: 0.545. Test acc: 0.688, test loss: 1.774. Time elapsed: 0:02:12\n[Epoch 30/50] Train acc: 0.922, train loss: 0.510. Test acc: 0.694, test loss: 1.815. Time elapsed: 0:02:15\n[Epoch 31/50] Train acc: 0.923, train loss: 0.497. Test acc: 0.691, test loss: 1.727. Time elapsed: 0:02:19\n[Epoch 32/50] Train acc: 0.905, train loss: 0.562. Test acc: 0.679, test loss: 1.882. Time elapsed: 0:02:23\n[Epoch 33/50] Train acc: 0.918, train loss: 0.521. Test acc: 0.674, test loss: 1.917. Time elapsed: 0:02:26\n[Epoch 34/50] Train acc: 0.951, train loss: 0.406. Test acc: 0.716, test loss: 1.670. Time elapsed: 0:02:30\n[Epoch 35/50] Train acc: 0.878, train loss: 0.687. Test acc: 0.653, test loss: 2.112. Time elapsed: 0:02:34\n[Epoch 36/50] Train acc: 0.951, train loss: 0.406. Test acc: 0.715, test loss: 1.727. Time elapsed: 0:02:37\n[Epoch 37/50] Train acc: 0.935, train loss: 0.455. Test acc: 0.711, test loss: 1.719. Time elapsed: 0:02:41\n[Epoch 38/50] Train acc: 0.940, train loss: 0.439. Test acc: 0.706, test loss: 1.715. Time elapsed: 0:02:45\n[Epoch 39/50] Train acc: 0.868, train loss: 0.731. Test acc: 0.655, test loss: 2.109. Time elapsed: 0:02:48\n[Epoch 40/50] Train acc: 0.952, train loss: 0.400. Test acc: 0.711, test loss: 1.708. Time elapsed: 0:02:52\n[Epoch 41/50] Train acc: 0.949, train loss: 0.406. Test acc: 0.708, test loss: 1.702. Time elapsed: 0:02:56\n[Epoch 42/50] Train acc: 0.960, train loss: 0.371. Test acc: 0.718, test loss: 1.654. Time elapsed: 0:02:59\n[Epoch 43/50] Train acc: 0.873, train loss: 0.691. Test acc: 0.649, test loss: 2.030. Time elapsed: 0:03:03\n[Epoch 44/50] Train acc: 0.942, train loss: 0.423. Test acc: 0.700, test loss: 1.730. Time elapsed: 0:03:07\n[Epoch 45/50] Train acc: 0.918, train loss: 0.518. Test acc: 0.682, test loss: 1.846. Time elapsed: 0:03:10\n[Epoch 46/50] Train acc: 0.948, train loss: 0.407. Test acc: 0.705, test loss: 1.731. Time elapsed: 0:03:14\n[Epoch 47/50] Train acc: 0.947, train loss: 0.404. Test acc: 0.698, test loss: 1.761. Time elapsed: 0:03:18\n[Epoch 48/50] Train acc: 0.908, train loss: 0.543. Test acc: 0.679, test loss: 1.927. Time elapsed: 0:03:21\n[Epoch 49/50] Train acc: 0.942, train loss: 0.418. Test acc: 0.705, test loss: 1.818. Time elapsed: 0:03:25\n[Epoch 50/50] Train acc: 0.945, train loss: 0.407. Test acc: 0.705, test loss: 1.772. Time elapsed: 0:03:29\n"
],
[
"fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))\n\nax1.plot(all_test_error, lw=3)\nax1.set_ylabel('Error on test set', fontsize=20)\nax1.grid()\nax1.set_xlabel('Epochs', fontsize=20)\n\nax2.plot(all_train_loss, lw=3)\nax2.set_ylabel('Loss on train set', fontsize=20)\nax2.grid()\nax2.set_xlabel('Epochs', fontsize=20)\n\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e79cfa22f76e94e4157be591a9396d5a11d6eaf2 | 6,143 | ipynb | Jupyter Notebook | python/optimizer/Untitled.ipynb | jamOne-/kiwi-zero | 6c4c1dea21f0acc2ed0ffff4b7b73b11bfa6cf5c | [
"MIT"
] | null | null | null | python/optimizer/Untitled.ipynb | jamOne-/kiwi-zero | 6c4c1dea21f0acc2ed0ffff4b7b73b11bfa6cf5c | [
"MIT"
] | null | null | null | python/optimizer/Untitled.ipynb | jamOne-/kiwi-zero | 6c4c1dea21f0acc2ed0ffff4b7b73b11bfa6cf5c | [
"MIT"
] | null | null | null | 28.705607 | 121 | 0.366596 | [
[
[
"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras import layers",
"_____no_output_____"
],
[
"def get_model(input_shape):\n model = tf.keras.Sequential()\n model.add(layers.Dense(1, activation=\"sigmoid\", use_bias=False, input_shape=input_shape, name=\"output\"))\n\n return model",
"_____no_output_____"
],
[
"model = get_model((128,))\nprint(model.count_params())\nprint(model.get_layer('output').get_weights())\n\n# while True:\n# weights = np.array(map(float, input().split(\" \")))\n# Xs_length = int(input())\n# Xs = [list(map(float, input().split(\" \"))) for i in range(Xs_length)]\n# ys = [list(map(float, input().split(\" \"))) for i in range(Xs_length)]\n\n# Xs = np.array(Xs)\n# ys = np.array(ys)",
"128\n[array([[-0.070042 ],\n [-0.12967902],\n [ 0.1784502 ],\n [ 0.1888119 ],\n [ 0.05343178],\n [ 0.19890565],\n [ 0.06547093],\n [ 0.12398905],\n [ 0.15475494],\n [ 0.17756364],\n [-0.14568566],\n [-0.06737414],\n [-0.04501684],\n [-0.13787413],\n [ 0.11470982],\n [-0.13841954],\n [-0.11169987],\n [ 0.18821663],\n [-0.08993939],\n [-0.16105573],\n [ 0.03888461],\n [-0.10295212],\n [ 0.17222464],\n [-0.06266831],\n [ 0.19148341],\n [-0.15825686],\n [ 0.17006227],\n [-0.17409597],\n [ 0.12592727],\n [-0.03068574],\n [-0.18968461],\n [ 0.03431857],\n [-0.03417501],\n [-0.12726405],\n [ 0.17843941],\n [ 0.08949974],\n [-0.10209975],\n [ 0.00043269],\n [ 0.07668221],\n [ 0.00680798],\n [ 0.15241116],\n [-0.07076454],\n [-0.1279983 ],\n [-0.09537274],\n [ 0.17841327],\n [ 0.17512372],\n [-0.09652679],\n [-0.10250092],\n [-0.2136502 ],\n [-0.08312435],\n [ 0.1023733 ],\n [ 0.03332238],\n [ 0.20347372],\n [-0.04064126],\n [ 0.13332117],\n [ 0.03582725],\n [ 0.17402193],\n [ 0.01574142],\n [ 0.17052588],\n [ 0.08714333],\n [-0.05233222],\n [-0.05263931],\n [ 0.0695495 ],\n [-0.07503645],\n [ 0.11541981],\n [ 0.15784734],\n [ 0.11444181],\n [ 0.12553653],\n [ 0.06042528],\n [ 0.21122637],\n [ 0.14210254],\n [-0.08238253],\n [-0.00500463],\n [-0.07139766],\n [ 0.19640747],\n [-0.12118605],\n [ 0.05339515],\n [-0.20029934],\n [ 0.03070615],\n [ 0.03541306],\n [ 0.12312922],\n [-0.05295265],\n [-0.09500886],\n [ 0.1716364 ],\n [ 0.01238753],\n [ 0.02762546],\n [ 0.01335333],\n [ 0.07084933],\n [ 0.15185982],\n [-0.13430697],\n [ 0.16014299],\n [ 0.10649008],\n [-0.06621373],\n [ 0.16061985],\n [-0.09452099],\n [-0.09701084],\n [ 0.12809864],\n [-0.19478916],\n [-0.02424586],\n [ 0.0449895 ],\n [-0.00197576],\n [-0.17941199],\n [-0.00968677],\n [ 0.14456001],\n [-0.11646473],\n [ 0.06357512],\n [ 0.1175434 ],\n [-0.01605225],\n [ 0.09839576],\n [-0.07668756],\n [ 0.02374648],\n [ 0.0183394 ],\n [-0.18995698],\n [-0.2053163 ],\n [-0.10305999],\n [-0.14742607],\n [-0.03274444],\n [-0.07992148],\n [-0.19393201],\n [ 0.15288195],\n [ 0.02912447],\n [-0.04568267],\n [-0.0249978 ],\n [-0.21113916],\n [ 0.12496763],\n [-0.03422838],\n [-0.1404042 ],\n [ 0.18714201]], dtype=float32)]\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e79d00bbe3c8def0d3d47927435c7d6b1ae897ee | 4,273 | ipynb | Jupyter Notebook | EX1_model2+3.ipynb | sonomarina/Mathmodelling_DTU | 1647cc0b4cdc7f46e9e38811675d87bb96c171b8 | [
"Apache-2.0"
] | null | null | null | EX1_model2+3.ipynb | sonomarina/Mathmodelling_DTU | 1647cc0b4cdc7f46e9e38811675d87bb96c171b8 | [
"Apache-2.0"
] | null | null | null | EX1_model2+3.ipynb | sonomarina/Mathmodelling_DTU | 1647cc0b4cdc7f46e9e38811675d87bb96c171b8 | [
"Apache-2.0"
] | null | null | null | 36.211864 | 239 | 0.502457 | [
[
[
"<a href=\"https://colab.research.google.com/github/sonomarina/Mathmodelling_DTU/blob/main/EX1_model2%2B3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"# DATA FILES DIRECTORY\ndirIn = 'path to Data folder for exercise 1'\n# IMPORTS\nimport helpFunctions as hf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport imageio\n# DAY ONE MULTISPECTRAL IMAGE AND ANNOTATIONS\nmultiIm, annotationIm = hf.loadMulti('multispectral_day01.mat','annotation_day01.png',\\\ndirIn)\n# ASSUME EQUAL PROBABILITY. CHANGE THIS TO ACCOMMODATE METHOD 3 PRIOR PROBABILITIES\np = 0.5\n# EXTRACT PIXELS FOR FAT AND MEAT\n[fatPix, fatR, fatC] = hf.getPix(multiIm, annotationIm[:,:,1]);\n[meatPix, meatR, meatC] = hf.getPix(multiIm, annotationIm[:,:,2]);\n# MEANS FOR THE BANDS\nmeatM = np.mean(meatPix,0)\nfatM = np.mean(fatPix, 0)\ncovM = np.zeros((19,19))\ncovF = np.zeros((19,19))\n# ARRAY OF PIXELS FOR EACH BAND\nm = 264196\nreshIm = np.zeros((m, 19))\nfor i in range(0, 19):\nreshIm[:,i] = multiIm[:,:,i].flatten()\n# COMPUTE COVARIANCE\nfor i in range(0, 19):\nfor j in range(0, 19):\ncovM[i, j] = 1/(m - 1)*np.sum((reshIm[:,i] - meatM[i])*(reshIm[:,j] - meatM[j]))\ncovF[i, j] = 1/(m - 1)*np.sum((reshIm[:,i] - fatM[i])*(reshIm[:,j] - fatM[j]))\ncov = 1/(2*m - 2)*((m-1)*covM + (m-1)*covF)",
"_____no_output_____"
],
[
"# LDA FUNCTION\n\ndef discFunc(x):\n s_meat = np.dot(x,np.dot(np.linalg.inv(cov),meatM)) - \\\n1/2*np.dot(meatM, np.dot(np.linalg.inv(cov),meatM)) + np.log(p)\n s_fat = np.dot(x,np.dot(np.linalg.inv(cov),fatM)) - \\\n1/2*np.dot(fatM, np.dot(np.linalg.inv(cov),fatM)) + np.log(p)\n return s_meat, s_fat\n# CLASSIFY THE PIXELS\ns = np.zeros(264196)\nfor i in range(0, 264196):\n s_meat, s_fat = discFunc(reshIm[i,:])\n if (s_fat >= s_meat):\n s[i] = 1\n else:\n s[i] = 0\ns = np.reshape(s, (514, 514)).astype(np.int64)\n# IDENTIFIED CLASSIFICATIONS VS KNOWN CLASSIFICATIONS\nc = s[fatR, fatC] == annotationIm[fatR, fatC, 1]\nrate = len(c[c == True])/len(c)\n# IDENTIFY BACKGROUND\nbPix = np.where(np.logical_and(annotationIm[:,:,0] == 0, annotationIm[:,:,1] == 0, \\\nannotationIm[:,:,2] == 0))\n# REMOVE BACKGROUND\ns[bPix] = 0\n# CLASSIFIED IMAGE PLOT\n[fatPix, fatR, fatC] = hf.getPix(multiIm, s)\npixId = np.stack((fatR, fatC), axis=1)\nimRGB = imageio.imread(dirIn + 'color_day01.png')\nrgbOut = hf.setImagePix(imRGB, pixId)\nplt.imshow(rgbOut)\nplt.title('Classified image of the salami on day 1')\nplt.xlim(0,514)\nplt.ylim(514,0)\nplt.savefig('salami_day1_model_2', dpi = 300)\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
e79d01e067cf26c907781569bd4486036dfdb71f | 290,861 | ipynb | Jupyter Notebook | notebooks/StrongLensDemo.ipynb | bombrun/GaiaLQSO | b4d787a4d80732cbb5a3762c34298f2430dd0540 | [
"CC-BY-4.0"
] | 1 | 2019-04-11T12:48:32.000Z | 2019-04-11T12:48:32.000Z | notebooks/StrongLensDemo.ipynb | bombrun/GaiaLQSO | b4d787a4d80732cbb5a3762c34298f2430dd0540 | [
"CC-BY-4.0"
] | 2 | 2018-05-08T10:09:26.000Z | 2018-05-25T07:55:51.000Z | notebooks/StrongLensDemo.ipynb | bombrun/GaiaLQSO | b4d787a4d80732cbb5a3762c34298f2430dd0540 | [
"CC-BY-4.0"
] | null | null | null | 450.947287 | 43,474 | 0.935602 | [
[
[
"Jupyter notebook transcription of \nhttp://www.physics.utah.edu/~bolton/python_lens_demo/",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"original python files",
"_____no_output_____"
]
],
[
[
"ls *.py",
"lensdemo_funcs.py lensdemo_script.py\r\n"
]
],
[
[
"Import the necessary packages",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib as mp\nfrom matplotlib import pyplot as plt\nimport lensdemo_funcs as ldf",
"_____no_output_____"
],
[
"mp.rcParams['figure.figsize'] = (12, 8)",
"_____no_output_____"
]
],
[
[
"Package some image display preferences in a dictionary object, for use below:",
"_____no_output_____"
]
],
[
[
"myargs = {'interpolation': 'nearest', 'origin': 'lower', 'cmap': mp.cm.gnuplot}",
"_____no_output_____"
]
],
[
[
"# Test",
"_____no_output_____"
],
[
"Make some x and y coordinate images:",
"_____no_output_____"
]
],
[
[
"nx = 501\nny = 501\nxhilo = [-2.5, 2.5]\nyhilo = [-2.5, 2.5]\nx = (xhilo[1] - xhilo[0]) * np.outer(np.ones(ny), np.arange(nx)) / float(nx-1) + xhilo[0]\ny = (yhilo[1] - yhilo[0]) * np.outer(np.arange(ny), np.ones(nx)) / float(ny-1) + yhilo[0]",
"_____no_output_____"
],
[
"# The following lines can be used to verify that the SIE potential gradient\n# function actually computes what is is supposed to compute!\n# Feel free to disregard...\n\n# Pick some arbitrary lens parameters:\nlpar = np.asarray([1.11, -0.23, 0.59, 0.72, 33.3])\n# Compute the gradients:\n(xg, yg) = ldf.sie_grad(x, y, lpar)\n# Compute convergence as half the Laplacian of the potential from the gradients:\nkappa_g = 0.5 * ( (xg[1:-1,2:] - xg[1:-1,0:-2]) / (x[1:-1,2:] - x[1:-1,0:-2]) +\n (yg[2:,1:-1] - yg[0:-2,1:-1]) / (y[2:,1:-1] - y[0:-2,1:-1]))\n# Compute the expected analytic convergence for these lens parameters:\n(xn, yn) = ldf.xy_rotate(x, y, lpar[1], lpar[2], lpar[4])\nkappa_a = 0.5 * lpar[0] / np.sqrt(lpar[3]*xn[1:-1,1:-1]**2 + yn[1:-1,1:-1]**2 / lpar[3])\n\nf = plt.imshow(np.hstack((np.log(kappa_g), np.log(kappa_a), np.log(kappa_g) - np.log(kappa_a))),\n vmax=np.log(kappa_g).max(), vmin=np.log(kappa_g).min(), **myargs)\n# OK, looks good! Some disagreement in the center, which is to be expected.",
"_____no_output_____"
]
],
[
[
"# First lens",
"_____no_output_____"
],
[
"Set some Gaussian blob image parameters and pack them into an array",
"_____no_output_____"
]
],
[
[
"g_amp = 1.0 # peak brightness value\ng_sig = 0.05 # Gaussian \"sigma\" (i.e., size)\ng_xcen = 0.0 # x position of center\ng_ycen = 0.0 # y position of center\ng_axrat = 1.0 # minor-to-major axis ratio\ng_pa = 0.0 # major-axis position angle (degrees) c.c.w. from x axis\ngpar = np.array([g_amp, g_sig, g_xcen, g_ycen, g_axrat, g_pa])",
"_____no_output_____"
]
],
[
[
"The un-lensed Gaussian image:",
"_____no_output_____"
],
[
"Set some SIE lens-model parameters and pack them into an array:",
"_____no_output_____"
]
],
[
[
"l_amp = 1. # Einstein radius\nl_xcen = 0.0 # x position of center\nl_ycen = 0.0 # y position of center\nl_axrat = 1.0 # minor-to-major axis ratio\nl_pa = 0.0 # major-axis position angle (degrees) c.c.w. from x axis\nlpar = np.asarray([l_amp, l_xcen, l_ycen, l_axrat, l_pa])",
"_____no_output_____"
]
],
[
[
"The following lines will plot the un-lensed and lensed images side by side:",
"_____no_output_____"
]
],
[
[
"g_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
]
],
[
[
"# Playing around",
"_____no_output_____"
],
[
"## Ilustration of proper motion magnification",
"_____no_output_____"
]
],
[
[
"gpar = np.asarray([100.0, 0.01, -0.2, 0.1, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 1.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
],
[
"gpar = np.asarray([1.0, 0.05, -0.1, 0.1, 1.0, 0.0])\nlpar = np.asarray([1., 0.0, 0.0, 1.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
],
[
"gpar = np.asarray([1.0, 0.05, -0.0, 0.1, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 1.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
]
],
[
[
"## Ilustration of proper motion magnification",
"_____no_output_____"
]
],
[
[
"gpar = np.asarray([1.0, 0.05, -0.4, 0.1, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 2.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
],
[
"gpar = np.asarray([1.0, 0.05, -0.3, 0.1, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 2.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
],
[
"gpar = np.asarray([1.0, 0.05, -0.2, 0.1, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 2.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
],
[
"gpar = np.asarray([1.0, 0.05, -0.1, 0.1, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 2.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
],
[
"gpar = np.asarray([1.0, 0.05, 0.1, 0.0, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 2, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
]
],
[
[
"# Ilustration of missing counter part detection",
"_____no_output_____"
]
],
[
[
"gpar = np.asarray([1.0, 0.05, -0.8 , 0, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 1.0, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
]
],
[
[
"# Illustration of lens elliptisity",
"_____no_output_____"
]
],
[
[
"gpar = np.asarray([1.0, 0.05, -1 , 0, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 0.1, 0.0])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
],
[
"gpar = np.asarray([1.0, 0.05, -0.5 , 0, 1.0, 0.0])\nlpar = np.asarray([1.0, 0.0, 0.0, 0.1, 45])\ng_image = ldf.gauss_2d(x, y, gpar)\n(xg, yg) = ldf.sie_grad(x, y, lpar)\ng_lensimage = ldf.gauss_2d(x-xg, y-yg, gpar)\nf = plt.imshow(np.hstack((g_image, g_lensimage)), **myargs)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79d134730683e4f937a44d24ab34d8b9fb2364e | 919,324 | ipynb | Jupyter Notebook | tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb | luisarai/NMA2021 | d6cd66bf32d929f3030d0d66c2c92de55bd2d886 | [
"MIT"
] | null | null | null | tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb | luisarai/NMA2021 | d6cd66bf32d929f3030d0d66c2c92de55bd2d886 | [
"MIT"
] | null | null | null | tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb | luisarai/NMA2021 | d6cd66bf32d929f3030d0d66c2c92de55bd2d886 | [
"MIT"
] | null | null | null | 174.147376 | 173,650 | 0.861604 | [
[
[
"<a href=\"https://colab.research.google.com/github/luisarai/NMA2021/blob/main/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Tutorial 2: Optimal Control for Continuous State\n**Week 3, Day 3: Optimal Control**\n\n**By Neuromatch Academy**\n\n__Content creators:__ Zhengwei Wu, Shreya Saxena, Xaq Pitkow\n\n__Content reviewers:__ Karolina Stosio, Roozbeh Farhoodi, Saeed Salehi, Ella Batty, Spiros Chavlis, Matt Krause and Michael Waskom\n",
"_____no_output_____"
],
[
"**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**\n\n<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>",
"_____no_output_____"
],
[
"---\n# Tutorial Objectives\nIn this tutorial, we will implement a continuous control task: you will design control inputs for a linear dynamical system to reach a target state. The state here is continuous-valued, i.e. takes on any real number from $-\\infty$ to $\\infty$.\n\nYou have already learned about control for binary states in Tutorial 1, and you have learned about stochastic dynamics, latent states, and measurements yesterday. Now we introduce you to the new concepts of designing a controller with full observation of the state (linear qudratic regulator - LQR), and under partial observability of the state (linear quadratic gaussian - LQG).\n\nThe running example we consider throughout the tutorial is a cat trying to catch a mouse in space, using its handy little jet pack to navigate. ",
"_____no_output_____"
]
],
[
[
"# @title Tutorial slides\n\n# @markdown These are the slides for all videos in this tutorial.\nfrom IPython.display import IFrame\nIFrame(src=f\"https://mfr.ca-1.osf.io/render?url=https://osf.io/8j5rs/?direct%26mode=render%26action=download%26mode=render\", width=854, height=480)",
"_____no_output_____"
]
],
[
[
"---\n# Setup",
"_____no_output_____"
]
],
[
[
"# Imports\n\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom math import isclose",
"_____no_output_____"
],
[
"#@title Figure Settings\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nimport ipywidgets as widgets\nfrom ipywidgets import interact, fixed, HBox, Layout, VBox, interactive, Label\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")",
"_____no_output_____"
],
[
"# @title Plotting Functions\n\ndef plot_vs_time(s, slabel, color, goal=None, ylabel=None):\n plt.plot(s, color, label=slabel)\n if goal is not None:\n plt.plot(goal, 'm', label='goal $g$')\n plt.xlabel(\"Time\", fontsize=14)\n plt.legend(loc=\"upper right\")\n\n if ylabel:\n plt.ylabel(ylabel, fontsize=14)",
"_____no_output_____"
],
[
"# @title Helper Functions\n\nclass ExerciseError(AssertionError):\n pass\n\n\ndef test_lds_class(lds_class):\n from math import isclose\n ldsys = lds_class(T=2, ini_state=2., noise_var=0.)\n if not isclose(ldsys.dynamics(.9)[1], 1.8):\n raise ExerciseError(\"'dynamics' method is not correctly implemented!\")\n if not isclose(ldsys.dynamics_openloop(.9, 2., np.zeros(ldsys.T)-1.)[1], -0.2):\n raise ExerciseError(\"'dynamics_openloop' method is not correctly implemented!\")\n if not isclose(ldsys.dynamics_closedloop(.9, 2., np.zeros(ldsys.T)+.3)[0][1], 3.):\n raise ExerciseError(\"s[t] in 'dynamics_closedloop' method is not correctly implemented!\")\n if not isclose(ldsys.dynamics_closedloop(.9, 2., np.zeros(ldsys.T)+.3)[1][0], .6):\n raise ExerciseError(\"a[t] in 'dynamics_closedloop' method is not correctly implemented!\")\n ldsys.noise_var = 1.\n if isclose(ldsys.dynamics(.9)[1], 1.8):\n raise ExerciseError(\"Did you forget to add noise to your s[t+1] in 'dynamics'?\")\n if isclose(ldsys.dynamics_openloop(.9, 2., np.zeros(ldsys.T)-1.)[1], -0.2):\n raise ExerciseError(\"Did you forget to add noise to your s[t+1] in 'dynamics_openloop'?\")\n if isclose(ldsys.dynamics_closedloop(.9, 2., np.zeros(ldsys.T)+.3)[0][1], 3.):\n raise ExerciseError(\"Did you forget to add noise to your s[t+1] in 'dynamics_closedloop'?\")\n if not isclose(ldsys.dynamics_closedloop(.9, 2., np.zeros(ldsys.T)+.3)[1][0], .6):\n raise ExerciseError(\"Your input a[t] should not be noisy in 'dynamics_closedloop'.\")\n\n print('Well Done!')\n\n\ndef test_lqr_class(lqr_class):\n from math import isclose\n lqreg = lqr_class(T=2, ini_state=2., noise_var=0.)\n lqreg.goal = np.array([-2, -2])\n s = np.array([1, 2])\n a = np.array([3, 4])\n if not isclose(lqreg.calculate_J_state(s), 25):\n raise ExerciseError(\"'calculate_J_state' method is not correctly implemented!\")\n if not isclose(lqreg.calculate_J_control(a), 25):\n raise ExerciseError(\"'calculate_J_control' method is not correctly implemented!\")\n\n print('Well Done!')",
"_____no_output_____"
]
],
[
[
"---\n# Section 1: Exploring a Linear Dynamical System (LDS) with Open-Loop and Closed-Loop Control",
"_____no_output_____"
]
],
[
[
"# @title Video 1: Flying Through Space\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1Zv411B7WV\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"MLUTR8z16jI\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"\nIn this example, a cat is trying to catch a mouse in space. The location of the mouse is the goal state $g$, here a static goal. Later on, we will make the goal time varying, i.e. $g(t)$. The cat's location is the state of the system $s_t$. The state has its internal dynamics: think of the cat drifting slowly in space. These dynamics are such that the state at the next time step $s_{t+1}$ are a linear function of the current state $s_t$. There is some environmental noise (think: meteorites) affecting the state, here modeled as gaussian noise $w_t$.\n\nThe control input or action $a_t$ is the action of the jet pack, which has an effect $Ba_t$ on the state at the next time step $s_{t+1}$. In this tutorial, we will be designing the action $a_t$ to reach the goal $g$, with known state dynamics.\n\nThus, our linear discrete-time system evolves according to the following equation:\n\n\\begin{eqnarray*}\ns_{t+1} &=& Ds_t + Ba_t + w_t \\tag{1}\\\\\ns_{0} &=& s_{init}\n\\end{eqnarray*}\n\nwith \n\n$t$: time step, ranging from $1$ to $T$, where $T$ is the time horizon.\n\n$s_t$: state at time $t$ \n\n$a_t$: action at time $t$ (also known as control input)\n\n$w_t$: gaussian noise at time $t$\n\n$D$ and $B$: parameters of the linear dynamical system. \n\nFor simplicity, we will consider the 1D case, where the matrices reduce to scalars, and the states, control and noise are one-dimensional as well. Specifically, $D$ and $B$ are scalars.\n\nWe will consider the goal $g$ to be the origin, i.e. $g=0$, for Exercises 1 and 2.2. Note that if the state dynamics are stable, the state reaches $0$ in any case. This is a slightly unrealistic situation for the purposes of simplicity, but we will see more realistic cases later on with $g \\neq 0$\n\n**Stability** \\\\\nThe system is stable, i.e. the output remains finite for any finite initial condition $s_{init}$, if $|D|<1$.\n\n**Control** \\\\\nIn *open-loop control*, $a_t$ is not a function of $s_t$. In *closed-loop linear control*, $a_t$ is a linear function of the state $s_t$. Specifically, $a_t$ is the control gain $L_t$ multiplied by $s_t$, i.e. $a_t=L_t s_t$. For now, you will explore these equations, and later on, you will *design* $L_t$ to reach the goal $g$.",
"_____no_output_____"
],
[
"### Coding Exercise 1: Implement state evolution equations\n\n\nImplement the state evolution equations in the class methods as provided below, for the following cases: \\\\\n(a) no control: `def dynamics` \\\\\n(b) open-loop control: `def dynamics_openloop` \\\\\n(c) closed-loop control: `def dynamics_closedloop` \\\\\n\n*Tip: refer to Equation (1) above. The provided code uses the same notation*",
"_____no_output_____"
]
],
[
[
"class LDS:\n def __init__(self, T: int, ini_state: float, noise_var: float):\n self.T = T # time horizon\n self.ini_state = ini_state\n self.noise_var = noise_var\n\n def dynamics(self, D: float):\n s = np.zeros(self.T) # states initialization\n s[0] = self.ini_state\n noise = np.random.normal(0, self.noise_var, self.T)\n\n for t in range(self.T - 1):\n ####################################################################\n ## Insert your code here to fill with the state dynamics equation\n ## without any control input\n ## complete the function and remove\n #raise NotImplementedError(\"Exercise: Please complete 'dynamics'\")\n ####################################################################\n # calculate the state of t+1\n s[t + 1] = D*s[t] + noise[t]\n\n return s\n\n def dynamics_openloop(self, D: float, B: float, a: np.ndarray):\n\n s = np.zeros(self.T) # states initialization\n s[0] = self.ini_state\n noise = np.random.normal(0, self.noise_var, self.T)\n\n for t in range(self.T - 1):\n ####################################################################\n ## Insert your code here to fill with the state dynamics equation\n ## with open-loop control input a[t]\n ## complete the function and remove\n #raise NotImplementedError(\"Please complete 'dynamics_openloop'\")\n ####################################################################\n # calculate the state of t+1\n s[t + 1] = D*s[t] + B*a[t] +noise[t]\n\n return s\n\n def dynamics_closedloop(self, D: float, B: float, L: np.ndarray):\n\n s = np.zeros(self.T) # states initialization\n s[0] = self.ini_state\n noise = np.random.normal(0, self.noise_var, self.T)\n a = np.zeros(self.T - 1)\n\n for t in range(self.T - 1):\n ####################################################################\n ## Insert your code here to fill with the state dynamics equation\n ## with closed-loop control input as a function of control gain L.\n ## complete the function and remove\n #raise NotImplementedError(\"Please complete 'dynamics_closedloop'\")\n ####################################################################\n # calculate the current action\n a[t] = L[t] * s[t]\n # calculate the next state\n s[t + 1] = D*s[t] + B*a[t] +noise[t]\n\n return s, a\n\n\n# Test your function\ntest_lds_class(LDS)",
"Well Done!\n"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_799ec42e.py)\n\n",
"_____no_output_____"
],
[
"### Interactive Demo 1.1: Explore no control vs. open-loop control vs. closed-loop control\n\nOnce your code above passes the tests, use the interactive demo below to visualize the effects of different kinds of control inputs. \n\n(a) For the no-control case, can you identify two distinct outcomes, depending on the value of D? Why? \n\n(b) The open-loop controller works well--or does it? Run the simulation multiple times and see if there are any problems, especially in challenging (high noise) conditions. \n\n(c) Does the closed-loop controller fare better with the noise? Vary the values of $L$ and find a range where it quickly reaches the goal. \n",
"_____no_output_____"
]
],
[
[
"#@markdown Make sure you execute this cell to enable the widget!\n\n#@markdown Play around (attentively) with **`a`** and **`L`** to see the effect on the open-loop controlled and closed-loop controlled state.\n\ndef simulate_lds(D=0.95, L=-0.3, a=-1., B=2., noise_var=0.1,\n T=50, ini_state=2.):\n\n # linear dynamical system\n lds = LDS(T, ini_state, noise_var)\n\n # No control\n s_no_control=lds.dynamics(D)\n\n # Open loop control\n at = np.append(a, np.zeros(T - 1))\n s_open_loop = lds.dynamics_openloop(D, B, at)\n\n # Closed loop control\n Lt = np.zeros(T) + L\n s_closed_loop, a_closed_loop = lds.dynamics_closedloop(D, B, Lt)\n\n plt.figure(figsize=(10, 6))\n plt.plot(s_no_control, 'b', label='No control')\n plt.plot(s_open_loop, 'g', label='Open Loop with a = {}'.format(a))\n plt.plot(s_closed_loop, 'r', label='Closed Loop with L = {}'.format(L))\n plt.plot(np.zeros(T), 'm', label='goal')\n plt.title('LDS State Evolution')\n plt.ylabel('State', fontsize=14)\n plt.xlabel('Time', fontsize=14)\n plt.legend(loc=\"upper right\")\n plt.show()\n\nwidget=interactive(simulate_lds, {'manual': True},\n D=(.85, 1.05, .1),\n L=(-0.6, 0., .15),\n a=(-2., 1., 1.),\n B=(1., 3., 1.),\n noise_var=(0., 0.2, .1),\n T=fixed(50),\n ini_state=(2., 10., 4.))\n\nwidget.children[-2].description='Run Simulation'\nwidget.children[-2].style.button_color='lightgreen'\ncontrols = HBox(widget.children[:-1], layout=Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]))",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_4ae677cb.py)\n\n",
"_____no_output_____"
],
[
"### Interactive Demo 1.2: Exploring the closed-loop setting further \nExecute the cell below to visualize the MSE between the state and goal, as a function of control gain $L$. You should see a U-shaped curve, with a clear minimum MSE. The control gain at which the minimum MSE is reached, is the 'optimal' constant control gain for minimizing MSE, here called the numerical optimum. \n\nA green dashed line is shown $L = -\\frac{D}{B}$ with $D=0.95$ and $B=2$. Consider how Why is this the theoretical optimal control gain for minimizing MSE of the state $s$ to the goal $g=0$? Examine how the states evolve with a constant gain $L$\n$$\n\\begin{eqnarray*}\ns_{t+1} &=& Ds_t + Ba_t + w_t \\\\\n&=& Ds_t + B(Ls_t) + w_t \\\\\n&=& (D+BL)s_t + w_t \\tag{2}\n\\end{eqnarray*}\n$$\n\nNow, let's visualize the evolution of the system as we change the control gain. We will start with the optimal gain (the control gain that gets us the minimum MSE), and then explore over- and under- ambitious values. ",
"_____no_output_____"
]
],
[
[
"#@markdown Execute this cell to visualize MSE between state and goal, as a function of control gain\ndef calculate_plot_mse():\n D, B, noise_var, T, ini_state = 0.95, 2., 0.1, 50, 2.\n control_gain_array = np.linspace(0.1, 0.9, T)\n mse_array = np.zeros(control_gain_array.shape)\n for i in range(len(control_gain_array)):\n lds = LDS(T, ini_state, noise_var)\n L = - np.ones(T) * control_gain_array[i]\n s, a = lds.dynamics_closedloop(D, B, L)\n mse_array[i] = np.sum(s**2)\n\n plt.figure()\n plt.plot(-control_gain_array, mse_array, 'b')\n plt.axvline(x=-D/B, color='g', linestyle='--')\n plt.xlabel(\"control gain (L)\", fontsize=14)\n plt.ylabel(\"MSE between state and goal\" , fontsize=14)\n plt.title(\"MSE vs control gain\", fontsize=20)\n plt.show()\n\n\ncalculate_plot_mse()",
"_____no_output_____"
],
[
"#@markdown Make sure you execute this cell to enable the widget!\n\n#@markdown Explore different values of control gain **`L`** (close to optimal, over- and under- ambitious) \\\\\n\n\ndef simulate_L(L:float=-0.45):\n D, B, noise_var, T, ini_state = 0.95, 2., 0.1, 50, 2.\n lds = LDS(T, ini_state, noise_var)\n # Closed loop control with the numerical optimal control gain\n Lt = np.ones(T) * L\n s_closed_loop_choice, _ = lds.dynamics_closedloop(D, B, Lt)\n # Closed loop control with the theoretical optimal control gain\n L_theory = - D / B * np.ones(T)\n s_closed_loop_theoretical, _ = lds.dynamics_closedloop(D, B, L_theory)\n # Plotting closed loop state evolution with both theoretical and numerical optimal control gains\n plt.figure(figsize=(10, 6))\n plot_vs_time(s_closed_loop_theoretical,\n 'Closed Loop (Theoretical optimal control gain)','b')\n plot_vs_time(s_closed_loop_choice,\n 'Closed Loop (your choice of L = {})'.format(L), 'g',\n goal=np.zeros(T), ylabel=\"State\")\n plt.title('Closed Loop State Evolution')\n plt.show()\n\n\nwidget=interactive(simulate_L, {'manual': True}, L=(-1.05, 0.051, .1))\n\nwidget.children[-2].description='Run Simulation'\nwidget.children[-2].style.button_color='lightgreen'\n\ncontrols = HBox(widget.children[:-1], layout=Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]))",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_a2d58988.py)\n\n",
"_____no_output_____"
],
[
"---\n# Section 2: Designing an optimal control input using a linear quadratic regulator (LQR)",
"_____no_output_____"
]
],
[
[
"# @title Video 2: Linear quadratic regulator (LQR)\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1sz411v7za\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"NZSwDy7wtIs\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"## Section 2.1 Constraints on the system\nNow we will start imposing additional constraints on our system. For example. \nif you explored different values for $s_{init}$ above, you would have seen very large values for $a_t$ in order to get to the mouse in a short amount of time. However, perhaps the design of our jetpack makes it dangerous to use large amounts of fuel in a single timestep. We certainly do not want to explode, so we would like to keep the actions $a_t$ as small as possible while still mantaining good control.\n\nMoreover, in Exercise 1, we had restricted ourselves to a static control gain $L_t \\equiv L$. How would we vary it if we could?\n\nThis leads us to a more principled way of designing the optimal control input.\n### Setting up a cost function \n\nIn a finite-horizon LQR problem, the cost function is defined as: \n\n\\begin{eqnarray}\nJ({\\bf s},{\\bf a}) &=& J_{state}({\\bf s}) + \\rho J_{control}({\\bf a}) \\\\\n &=& \\sum_{t = 0}^{T} (s_{t}-g)^2 + \\rho \\sum_{t=0}^{T-1}a_{t}^2 \\tag{3}\n\\end{eqnarray}\n\nwhere $\\rho$ is the weight on the control effort cost, as compared to the cost of not being at the goal. Here, ${\\bf a} = \\{a_t\\}_{t=0}^{T-1}$, ${\\bf s} = \\{s_t\\}_{t=0}^{T}$. This is a quadratic cost function. In Exercise $2$, we will only explore $g=0$, in which case $J_{state}({\\bf s})$ can also be expressed as $\\sum_{t = 0}^{T} s_{t}^2$. In Exercise $3$, we will explore a non-zero time-varying goal.\n\nThe goal of the LQR problem is to find control ${\\bf a}$ such that $J({\\bf s},{\\bf a})$ is minimized. The goal is then to find the control gain at each time point, i.e.,\n\n$$ \\text{argmin} _{\\{L_t\\}_{t=0}^{T-1}} J({\\bf s},{\\bf a}) \\tag{4} $$ \n\nwhere $a_t = L_t s_t$.",
"_____no_output_____"
],
[
"\n## Section 2.2 Solving LQR\nThe solution to Equation (4), i.e. LQR for a finite time horizon, can be obtained via Dynamic Programming. For details, check out [this lecture by Stephen Boyd](https://stanford.edu/class/ee363/lectures/dlqr.pdf).\n\nFor an infinite time horizon, one can obtain a closed-form solution using Riccati equations, and the solution for the control gain becomes time-invariant, i.e. $L_t \\equiv L$. We will use this in Exercise 4. For details, check out [this other lecture by Stephen Boyd](https://stanford.edu/class/ee363/lectures/dlqr-ss.pdf).\n\nAdditional reference for entire section: \\\\\n[Bertsekas, Dimitri P. \"Dynamic programming and optimal control\". Vol. 1. No. 2. Belmont, MA: Athena scientific, 1995.](http://www.athenasc.com/dpbook.html)\n",
"_____no_output_____"
],
[
"### Coding Exercise 2.2: Implement the cost function\nThe cost function $J_{control}({\\bf s}, {\\bf a})$ can be divided into two parts: $J_{state}({\\bf s})$ and $J_{control}({\\bf a})$. \n\nCode up these two parts in the class methods `def calculate_J_state` and `def calculate_J_control` in the following helper class for LQR. \n",
"_____no_output_____"
]
],
[
[
"class LQR(LDS):\n def __init__(self, T, ini_state, noise_var):\n super().__init__(T, ini_state, noise_var)\n self.goal = np.zeros(T) # The class LQR only supports g=0\n\n def control_gain_LQR(self, D, B, rho):\n P = np.zeros(self.T) # Dynamic programming variable\n L = np.zeros(self.T - 1) # control gain\n P[-1] = 1\n\n for t in range(self.T - 1):\n P_t_1 = P[self.T - t - 1]\n P[self.T - t-2] = (1 + P_t_1 * D**2 - D * P_t_1 * B / (\n rho + P_t_1 * B) * B**2 * P_t_1 * D)\n\n L[self.T - t-2] = - (1 / (rho + P_t_1 * B**2) * B * P_t_1 * D)\n return L\n\n def calculate_J_state(self, s:np.ndarray):\n ########################################################################\n ## Insert your code here to calculate J_state(s) (see Eq. 3)\n ## complete the function and remove\n #raise NotImplementedError(\"Please complete 'calculate_J_state'\")\n ########################################################################\n # calculate the state\n J_state = np.sum((s-self.goal)**2)\n\n return J_state\n\n def calculate_J_control(self, a:np.ndarray):\n ########################################################################\n ## Insert your code here to calculate J_control(a) (see Eq. 3).\n ## complete the function and remove\n #raise NotImplementedError(\"Please complete 'calculate_J_control'\")\n ########################################################################\n # calculate the control\n J_control = np.sum(a**2)\n\n return J_control\n\n\n# Test class\ntest_lqr_class(LQR)",
"Well Done!\n"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_06c558e2.py)\n\n",
"_____no_output_____"
],
[
"### Interactive Demo 2.2: LQR to the origin \n\nIn this exercise, we will use your new LQR controller to track a static goal at $g=0$. Here, we will explore how varying $\\rho$ affects its actions by\\\\\n\n1. Using Equation 3, find a value for $\\rho$ that will get you the same cost and control gain as Exercise 1.\n2. Pick a larger value for $\\rho$ and see the effect on the action.\n3. Try increasing the rho to 2. What do you notice? \\\\\n4. For different values of $\\rho$, how does the control gain vary? ",
"_____no_output_____"
]
],
[
[
"#@markdown Make sure you execute this cell to enable the widget!\n\ndef simulate_rho(rho=1.):\n D, B, T, ini_state, noise_var = 0.9, 2., 50, 2., .1 # state parameter\n lqr = LQR(T, ini_state, noise_var)\n L = lqr.control_gain_LQR(D, B, rho)\n s_lqr, a_lqr = lqr.dynamics_closedloop(D, B, L)\n\n plt.figure(figsize=(14, 4))\n plt.suptitle('LQR Control for rho = {}'.format(rho), y=1.05)\n\n plt.subplot(1, 3, 1)\n plot_vs_time(s_lqr,'State evolution','b',goal=np.zeros(T))\n plt.ylabel('State $s_t$')\n\n plt.subplot(1, 3, 2)\n plot_vs_time(a_lqr,'LQR Action','b')\n plt.ylabel('Action $a_t$')\n\n plt.subplot(1, 3, 3)\n plot_vs_time(L,'Control Gain','b')\n plt.ylabel('Control Gain $L_t$')\n\n plt.tight_layout()\n plt.show()\n\nwidget=interactive(simulate_rho, {'manual': True}, rho=(0., 2., 0.5))\n\nwidget.children[-2].description = 'Run Simulation'\nwidget.children[-2].style.button_color = 'lightgreen'\n\ncontrols = HBox(widget.children[:-1], layout=Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]))",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_f5b9225d.py)\n\n",
"_____no_output_____"
],
[
"## Section 2.3: The tradeoff between state cost and control cost\n\nIn Exercise 2.1, you implemented code to calculate for $J_{state}$ and $J_{control}$ in the class methods for the class LQR. \n\nWe will now plot them against each other for varying values of $\\rho$ to explore the tradeoff between state cost and control cost.",
"_____no_output_____"
]
],
[
[
"#@markdown Execute this cell to visualize the tradeoff between state and control cost\ndef calculate_plot_costs():\n D, B, noise_var, T, ini_state = 0.9, 2., 0.1, 50, 2.\n rho_array = np.linspace(0.2, 40, 100)\n J_state = np.zeros(rho_array.shape)\n J_control = np.zeros(rho_array.shape)\n for i in np.arange(len(rho_array)):\n lqr = LQR(T, ini_state, noise_var)\n L = lqr.control_gain_LQR(D, B, rho_array[i])\n s_lqr, a_lqr = lqr.dynamics_closedloop(D, B, L)\n J_state[i] = lqr.calculate_J_state(s_lqr)\n J_control[i] = lqr.calculate_J_control(a_lqr)\n\n fig = plt.figure(figsize=(6, 6))\n plt.plot(J_state, J_control, '.b')\n plt.xlabel(\"$J_{state} = \\sum_{t = 0}^{T} (s_{t}-g)^2$\", fontsize=14)\n plt.ylabel(\"$J_{control} = \\sum_{t=0}^{T-1}a_{t}^2$\" , fontsize=14)\n plt.title(\"Error vs control effort\", fontsize=20)\n plt.show()\n\n\ncalculate_plot_costs()",
"_____no_output_____"
]
],
[
[
"You should notice the bottom half of a 'C' shaped curve, forming the tradeoff between the state cost and the control cost under optimal linear control.\nFor a desired value of the state cost, we cannot reach a lower control cost than the curve in the above plot. Similarly, for a desired value of the control cost, we must accept that amount of state cost. For example, if you know that you have a limited amount of fuel, which determines your maximum control cost to be $J_{control}^{max}$. \n\nYou will be able to show that you will not be able to track your state with a higher accuracy than the corresponding $J_{state}$ as given by the graph above. This is thus an important curve when designing a system and exploring its control.\n",
"_____no_output_____"
],
[
"---\n# Section 3: LQR for tracking a time-varying goal",
"_____no_output_____"
]
],
[
[
"# @title Video 3: Tracking a moving goal\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1up4y1S7gg\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"HOoqM7kBWSY\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"In a more realistic situation, the mouse would move around constantly. Suppose you were able to predict the movement of the mouse as it bounces from one place to another. This becomes your goal trajectory $g_t$.\n\nWhen the target state, denoted as $g_t$, is not $0$, the cost function becomes\n$$ J({\\bf a}) = \\sum_{t = 0}^{T} (s_{t}- g_t) ^2 + \\rho \\sum_{t=0}^{T-1}(a_{t}-\\bar a_t)^2$$\nHere, $\\bar a_t$ is the desired action based on the goal trajectory. In other words, the controller considers the goal for the next time step, and designs a preliminary control action that gets the state at the next time step to the desired goal. Specifically, without taking into account noise $w_t$, we would like to design $\\bar a_t$ such that $s_{t+1}=g_{t+1}$. Thus, from Equation $(1)$,\n\n\\begin{eqnarray*}\ng_{t+1} &=& Ds_t + B \\bar a_t\\\\\n\\bar a_{t} &=& \\frac{- Ds_t + g_{t+1}}{B}\\\\\n\\end{eqnarray*}\n\nThe final control action $a_t$ is produced by adding this desired action $\\bar a_t$ with the term with the control gain $L_t(s_t - g_t)$.",
"_____no_output_____"
]
],
[
[
"#@markdown Execute this cell to include class\n#@markdown for LQR control to desired time-varying goal\n\nclass LQR_tracking(LQR):\n def __init__(self, T, ini_state, noise_var, goal):\n super().__init__(T, ini_state, noise_var)\n self.goal = goal\n\n def dynamics_tracking(self, D, B, L):\n\n s = np.zeros(self.T) # states intialization\n s[0] = self.ini_state\n\n noise = np.random.normal(0, self.noise_var, self.T)\n\n a = np.zeros(self.T) # control intialization\n a_bar = np.zeros(self.T)\n for t in range(self.T - 1):\n a_bar[t] = ( - D * s[t] + self.goal[t + 1]) / B\n a[t] = L[t] * (s[t] - self.goal[t]) + a_bar[t]\n s[t + 1] = D * s[t] + B * a[t] + noise[t]\n\n return s, a, a_bar\n\n def calculate_J_state(self,s):\n J_state = np.sum((s-self.g)**2)\n return J_state\n\n def calculate_J_control(self, a, a_bar):\n J_control = np.sum((a-a_bar)**2)\n return J_control",
"_____no_output_____"
]
],
[
[
"### Interactive Demo 3: LQR control to desired time-varying goal\nUse the demo below to explore how LQR tracks a time-varying goal. \nStarting with the sinusoidal goal function `sin`, investigate how the system reacts with different values of $\\rho$ and process noise variance. Next, explore other time-varying goal, such as a step function and ramp.",
"_____no_output_____"
]
],
[
[
"#@markdown Make sure you execute this cell to enable the widget!\n\ndef simulate_tracking(rho=20., noise_var=0.1, goal_func='sin'):\n D, B, T, ini_state = 0.9, 2., 100, 0.\n if goal_func == 'sin':\n goal = np.sin(np.arange(T) * 2 * np.pi * 5 / T)\n elif goal_func == 'step':\n goal = np.zeros(T)\n goal[int(T / 3):] = 1.\n elif goal_func == 'ramp':\n goal = np.zeros(T)\n goal[int(T / 3):] = np.arange(T - int(T / 3)) / (T - int(T / 3))\n\n lqr_time = LQR_tracking(T, ini_state, noise_var, goal)\n L = lqr_time.control_gain_LQR(D, B, rho)\n s_lqr_time, a_lqr_time, a_bar_lqr_time = lqr_time.dynamics_tracking(D, B, L)\n\n plt.figure(figsize=(13, 5))\n plt.suptitle('LQR Control for time-varying goal', y=1.05)\n plt.subplot(1, 2, 1)\n plot_vs_time(s_lqr_time,'State evolution $s_t$','b',goal, ylabel=\"State\")\n plt.subplot(1, 2, 2)\n plot_vs_time(a_lqr_time, 'Action $a_t$', 'b', ylabel=\"Action\")\n plt.show()\n\n\nwidget=interactive(simulate_tracking, {'manual': True},\n rho=(0., 40., 10.),\n noise_var=(0., 1., .2),\n goal_func=['sin', 'step', 'ramp']\n )\nwidget.children[-2].description = 'Run Simulation'\nwidget.children[-2].style.button_color = 'lightgreen'\n\ncontrols = HBox(widget.children[:-1], layout=Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]))",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_eb1414c8.py)\n\n",
"_____no_output_____"
],
[
"---\n# Section 4: Control of an partially observed state using a Linear Quadratic Gaussian (LQG) controller\n",
"_____no_output_____"
],
[
"## Section 4.1 Introducing the LQG Controller",
"_____no_output_____"
]
],
[
[
"# @title Video 4: Linear Quadratic Gaussian (LQG) Control\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1xZ4y1u73B\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"c_D7iDLT_bw\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"In practice, the controller does not have full access to the state. For example, your jet pack in space may be controlled by Mission Control back on earth! In this case, noisy measurements $m_t$ of the state $s_t$ are taken via radar, and the controller needs to (1) estimate the true state, and (2) design an action based on this estimate. \n\nFortunately, the separation principle tells us that it is optimal to do (1) and (2) separately. This makes our problem much easier, since we already know how to do each step. \n\n1) *State Estimation* \nCan we recover the state from the measurement? \nFrom yesterday's lecture, it is known that the states $\\hat{s}_t$ can be estimated from the measurements $m_t$ using the __Kalman filter__. \n\n2) *Design Action* \nIn Sections 2 and 3 above, we just learnt about the LQR controller which designs an action based on the state. The separation principle tells us that it is sufficient to replace the use of the state in LQR with the *estimated* state, i.e.\n\n$$a_t = L_t \\hat s_t$$\n\nThe state dynamics will then be:\n$$s_{t+1} = D s_t + B a_t + w_t$$\nwhere $w_t$ is the process noise (proc_noise), and the observation / measurement is:\n$$ y_t = C s_t + v_t$$ \nwith $v_t$ being the measurement noise (meas_noise).\n\nThe combination of (1) state estimation and (2) action design using LQR is known as a **linear quadratic gaussian (LQG)**. Yesterday, you completed the code for Kalman filter. Based on that, you will code up the LQG controller. For these exercises, we will resturn to using the goal $g=0$, as in Section 2.\n",
"_____no_output_____"
],
[
"### Interactive Demo 4.1: The Kalman filter in conjunction with a linear closed-loop controller (LQG Control)\nIn the `MyKalmanFilter` class, the method `filter_control` implements filtering in closed-loop feedback. It is a combination of generating samples (states $s_t$) and filtering (generating state estimates $\\hat s_t$), as you have seen in yesterday's tutorial. The only difference from yesterday is that today's Kalman filter is in closed loop with the controller. Thus, each $s_{t+1}$ gets an input $a_t$, which itself depends on the state estimate of the last time step $\\hat s_t$.\n\nBelow you find the code snipets for the Kalman filter in closed loop (`MyKalmanFilter`) class that provide you an insight in action update (`control_policy_LQG`) and state estimation (`state_dynamics_LQG`). Please feel free to inspect the helper functions and classes for the details.\n",
"_____no_output_____"
],
[
"You should have seen the next cell containing `MyKalmanFilter` class yesterday, with the exception of the controller acting on the state estimate in feedback, using the methods/equations you will find below.",
"_____no_output_____"
]
],
[
[
"#@markdown Execute this cell to include MyKalmanFilter class\n\nclass MyKalmanFilter():\n def __init__(self, n_dim_state, n_dim_obs, transition_matrices, transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean, initial_state_covariance, control_matrices):\n \"\"\"\n @param n_dim_state: dimension of the latent variables\n @param n_dim_obs: dimension of the observed variables\n @param transition_matrices: D\n @param transition_covariance: process noise\n @param observation_matrices: C\n @param observation_covariance: measurement noise\n @param initial_state_mean: initial state estimate\n @param initial_state_covariance: initial estimate on state variance\n @param control_matrices: B\n \"\"\"\n self.n_dim_state = n_dim_state\n self.n_dim_obs = n_dim_obs\n self.transition_matrices = transition_matrices\n self.transition_covariance = transition_covariance\n self.observation_matrices = observation_matrices\n self.observation_covariance = observation_covariance\n self.initial_state_mean = initial_state_mean\n self.initial_state_covariance = initial_state_covariance\n\n self.control_matrices = control_matrices\n\n def filter_control(self, n_timesteps, control_gain, use_myfilter=True):\n \"\"\"\n Method that performs Kalman filtering with a controller in feedback\n @param n_timesteps: length of the data sample\n @param control_gain: a numpy array whose dimension is [n_timesteps, self.n_dim_state]\n @output: filtered_state_means: a numpy array whose dimension is [n_timesteps, self.n_dim_state]\n @output: filtered_state_covariances: a numpy array whose dimension is [n_timesteps, self.n_dim_state, self.n_dim_state]\n @output: latent_state: a numpy array whose dimension is [n_timesteps, self.n_dim_state]\n @output: observed_state: a numpy array whose dimension is [n_timesteps, self.n_dim_obs]\n @output: control: a numpy array whose dimension is [n_timesteps, self.n_dim_state]\n \"\"\"\n\n # validate inputs\n # assert observed_dim == self.n_dim_obs\n\n n_example = n_timesteps\n observed_dim = self.n_dim_obs\n latent_state = []\n observed_state = []\n control = []\n\n current_latent_state = self.initial_state_mean #initial_state\n control.append(self.initial_state_mean)\n latent_state.append(current_latent_state)\n observed_state.append(np.dot(self.observation_matrices, current_latent_state) +\n np.random.multivariate_normal(np.zeros(self.n_dim_obs), self.observation_covariance))\n\n\n\n # create holders for outputs\n filtered_state_means = np.zeros([n_example, self.n_dim_state])\n filtered_state_covariances = np.zeros([n_example, self.n_dim_state, self.n_dim_state])\n\n\n if use_myfilter:\n # the first state mean and state covar is the initial expectation\n filtered_state_means[0] = self.initial_state_mean\n filtered_state_covariances[0] = self.initial_state_covariance\n\n # initialize internal variables\n current_state_mean = self.initial_state_mean.copy()\n current_state_covar = self.initial_state_covariance.copy()\n self.p_n_list = np.zeros((n_example, self.n_dim_obs, self.n_dim_obs))\n\n for i in range(1, n_example):\n ## Use the code in Exercise 4.1 to get the current action\n current_action = control_policy_LQG(self,current_state_mean,control_gain[i])\n control.append(current_action)\n\n\n ## Use the code in Exercise 4.1 to update the state\n current_latent_state = state_dynamics_LQG(self,current_latent_state, current_action)\n latent_state.append(current_latent_state)\n\n # use observation_matrices and observation_covariance to calculate next observed state\n observed_state.append(np.dot(self.observation_matrices, current_latent_state\n ) + np.random.multivariate_normal(np.zeros(self.n_dim_obs), self.observation_covariance))\n\n current_observed_data = observed_state[-1]\n\n\n # run a single step forward filter\n # prediction step\n\n predicted_state_mean = np.dot(self.transition_matrices, current_state_mean\n ) + np.dot(self.control_matrices, current_action)\n predicted_state_cov = np.matmul(np.matmul(self.transition_matrices, current_state_covar),\n np.transpose(self.transition_matrices)) + self.transition_covariance\n # observation step\n innovation = current_observed_data - np.dot(self.observation_matrices, predicted_state_mean)\n innovation_covariance = np.matmul(np.matmul(self.observation_matrices, predicted_state_cov),\n np.transpose(self.observation_matrices)) + self.observation_covariance\n # update step\n\n\n kalman_gain = np.matmul(np.matmul(predicted_state_cov, np.transpose(self.observation_matrices)),\n np.linalg.inv(innovation_covariance))\n current_state_mean = predicted_state_mean + np.dot(kalman_gain, innovation)\n current_state_covar = np.matmul((np.eye(current_state_covar.shape[0]) -\n np.matmul(kalman_gain, self.observation_matrices)),\n predicted_state_cov)\n # populate holders\n filtered_state_means[i, :] = current_state_mean\n filtered_state_covariances[i, :, :] = current_state_covar\n self.p_n_list[i, :, :] = predicted_state_cov\n # self.p_n_list[i-1, :, :] = predicted_state_cov\n # new\n # self.p_n_list[-1, :, :] = np.matmul(np.matmul(self.transition_matrices, filtered_state_covariances[-1,:,:]),\n # np.linalg.inv(self.transition_matrices)) + self.transition_covariance\n\n# else:\n# #################################################################################\n# # below: this is an alternative if you do not have an implementation of filtering\n# kf = KalmanFilter(n_dim_state=self.n_dim_state, n_dim_obs=self.n_dim_obs)\n# need_params = ['transition_matrices', 'observation_matrices', 'transition_covariance',\n# 'observation_covariance', 'initial_state_mean', 'initial_state_covariance']\n# for param in need_params:\n# setattr(kf, param, getattr(self, param))\n# filtered_state_means, filtered_state_covariances = kf.filter(X)\n# #################################################################################\n\n filtered_state_means = np.squeeze(np.array(filtered_state_means))\n filtered_state_covariances = np.squeeze(np.array(filtered_state_covariances))\n latent_state = np.squeeze(np.array(latent_state))\n observed_state = np.squeeze(np.array(observed_state))\n control = np.squeeze(np.array(control))\n\n\n return filtered_state_means, filtered_state_covariances, latent_state, observed_state, control\n\n def plot_state_vs_time(self, n_timesteps, control_gain, title, use_myfilter=True, goal=None):\n filtered_state_means_impl, filtered_state_covariances_impl, latent, measurement, control = self.filter_control(\n n_timesteps, control_gain)\n\n fig = plt.figure(figsize=(12, 4))\n plt.suptitle(title, y=1.05)\n gs = gridspec.GridSpec(1, 2, width_ratios=[1, 2])\n\n ax0 = plt.subplot(gs[0])\n ax0.plot(latent,filtered_state_means_impl, 'b.')\n ax0.set_xlabel('Latent State')\n ax0.set_ylabel('Estimated State')\n ax0.set_aspect('equal')\n\n ax1 = plt.subplot(gs[1])\n ax1.plot(latent, 'b', label = 'Latent State')\n ax1.plot(filtered_state_means_impl, 'r', label = 'Estimated State')\n if goal is not None:\n ax1.plot(goal, 'm', label = 'goal')\n ax1.set_xlabel('Time')\n ax1.set_ylabel('State')\n ax1.legend(loc=\"upper right\")\n plt.tight_layout()\n plt.show()",
"_____no_output_____"
],
[
"# inspect the 'control_policy_LQG' and 'state_dynamics_LQG' methods:\n\ndef control_policy_LQG(self, mean_estimated_state, control_gain):\n current_action = control_gain * mean_estimated_state\n return current_action\n\ndef state_dynamics_LQG(self, current_latent_state, current_action):\n\n current_latent_state = np.dot(self.transition_matrices, current_latent_state)\\\n + np.dot(self.control_matrices, current_action)\\\n + np.random.multivariate_normal(np.zeros(self.n_dim_state),\n self.transition_covariance)\n return current_latent_state",
"_____no_output_____"
]
],
[
[
"Take a look at the helper code for the `MyKalmanFilter` class above. In the following exercises, we will use the same notation that we have been using in this tutorial; adapter code has been provided to convert it into the representation `MyKalmanFilter expects`.\n\nUse interactive demo below to refresh your memory of how a Kalman filter estimates state. `C` scales the observation matrix.",
"_____no_output_____"
]
],
[
[
"#@markdown Make sure you execute this cell to enable the widget!\n\ndef simulate_kf_no_control(D=0.9, B=2., C=1., L=0., T=50, ini_state=5,\n proc_noise = 0.1, meas_noise = 0.2):\n\n control_gain = np.ones(T) * L\n\n # Format the above variables into a format acccepted by the Kalman Filter\n n_dim_state = 1\n n_dim_obs = 1\n n_timesteps = T\n\n transition_matrices = np.eye(n_dim_state) * D\n transition_covariance = np.eye(n_dim_obs) * proc_noise # process noise\n observation_matrices = np.eye(n_dim_state) * C\n observation_covariance = np.eye(n_dim_obs) * meas_noise\n initial_state_mean = np.ones(n_dim_state) * ini_state\n initial_state_covariance = np.eye(n_dim_state) * .01\n control_matrices = np.eye(n_dim_state) * B\n\n my_kf = MyKalmanFilter(n_dim_state, n_dim_obs, transition_matrices,\n transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean,\n initial_state_covariance, control_matrices)\n\n my_kf.plot_state_vs_time(n_timesteps, control_gain,\n 'State estimation with KF (no control input)')\n\n\nwidget=interactive(simulate_kf_no_control, {'manual': True},\n D=fixed(.95),\n B=fixed(2.),\n C=(0., 3., 1.),\n proc_noise=(0., 1., .1),\n meas_noise=(0.1, 1., .1),\n T=fixed(50),\n L=fixed(0),\n ini_state=fixed(5.))\n\nwidget.children[-2].description = 'Run Simulation'\nwidget.children[-2].style.button_color = 'lightgreen'\ncontrols = HBox(widget.children[:-1], layout=Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]))",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_5ecbeb0c.py)\n\n",
"_____no_output_____"
],
[
"### Interactive Demo 4.2: LQG controller output with varying control gains\n\nNow let's implement the Kalman filter with closed-loop feedback with the controller. We will first use an arbitary control gain and a fixed value for measurement noise. We will then use the control gain from the LQR for optimal performance, with varying values for $\\rho$.\n\n(a) Visualize the system dynamics $s_t$ in closed-loop control with an arbitrary constant control gain. Vary this control gain.\n\n(b) Vary $\\rho$ to visualize the output of the optimal LQG controller. Here, we will use an optimal *constant* control gain, which is optimal in the case of an infinite time horizon (get to the goal and stay there forever).",
"_____no_output_____"
]
],
[
[
"#@markdown Make sure you execute this cell to enable the widget!\n\ndef simulate_kf_with_control(D=0.9, B=2., C=1., L=-0.1, T=50, ini_state=5,\n proc_noise = 0.1, meas_noise = 0.2):\n\n control_gain = np.ones(T)*L\n\n # Format the above variables into a format acccepted by the Kalman Filter\n n_dim_state = 1\n n_dim_obs = 1\n n_timesteps = T\n\n transition_matrices = np.eye(n_dim_state) * D\n transition_covariance = np.eye(n_dim_obs) * proc_noise # process noise\n observation_matrices = np.eye(n_dim_state) * C\n observation_covariance = np.eye(n_dim_obs) * meas_noise\n initial_state_mean = np.ones(n_dim_state) * ini_state\n initial_state_covariance = np.eye(n_dim_state) * .01\n control_matrices = np.eye(n_dim_state) * B\n\n my_kf = MyKalmanFilter(n_dim_state, n_dim_obs, transition_matrices,\n transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean,\n initial_state_covariance, control_matrices)\n\n my_kf.plot_state_vs_time(n_timesteps, control_gain, goal = np.zeros(T),\n title='State estimation with KF (controller gain = {})'.format(L))\n\n\nwidget=interactive(simulate_kf_with_control, {'manual': True},\n D=fixed(.9),\n B=fixed(2.),\n C=(0., 3., 1.),\n proc_noise=(0., 1., .1),\n meas_noise=(0.1, 1., .1),\n T=fixed(50),\n L=(-0.5, 0., .1),\n ini_state=fixed(5.))\n\nwidget.children[-2].description = 'Run Simulation'\nwidget.children[-2].style.button_color = 'lightgreen'\ncontrols = HBox(widget.children[:-1], layout=Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]))",
"_____no_output_____"
]
],
[
[
"### Interactive Demo 4.3: LQG with varying control effort costs\n\nNow let's see the performance of the LQG controller. We will use an LQG controller gain, where the control gain is from a system with an infinite-horizon. In this case, the optimal control gain turns out to be a constant. \n\nVary the value of $\\rho$ from $0$ to large values, to see the effect on the state.",
"_____no_output_____"
]
],
[
[
"#@markdown Execute this cell to include helper function for LQG\n\nclass LQG(MyKalmanFilter, LQR):\n def __init__(self, T, n_dim_state, n_dim_obs,\n transition_matrices, transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean, initial_state_covariance, control_matrices):\n MyKalmanFilter.__init__(self,n_dim_state, n_dim_obs,\n transition_matrices, transition_covariance,\n observation_matrices,observation_covariance,\n initial_state_mean, initial_state_covariance, control_matrices)\n LQR.__init__(self,T, initial_state_mean, transition_covariance)\n\n def control_gain_LQR_infinite(self, rho):\n control_gain_LQR_finite = self.control_gain_LQR(self.transition_matrices, self.control_matrices, rho)\n return control_gain_LQR_finite[0]",
"_____no_output_____"
],
[
"#@markdown Make sure you execute this cell to enable the widget!\n\ndef simulate_kf_with_lqg(D=0.9, B=2., C=1., T=50, ini_state=5,\n proc_noise=0.1, meas_noise=0.2, rho=1.):\n\n # Format the above variables into a format acccepted by the Kalman Filter\n n_dim_state = 1\n n_dim_obs = 1\n n_timesteps = T\n\n transition_matrices = np.eye(n_dim_state) * D\n transition_covariance = np.eye(n_dim_obs) * proc_noise # process noise\n observation_matrices = np.eye(n_dim_state) * C\n observation_covariance = np.eye(n_dim_obs) * meas_noise\n initial_state_mean = np.ones(n_dim_state) * ini_state\n initial_state_covariance = np.eye(n_dim_state) * .01\n control_matrices = np.eye(n_dim_state) * B\n\n my_kf = MyKalmanFilter(n_dim_state, n_dim_obs, transition_matrices,\n transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean,\n initial_state_covariance, control_matrices)\n\n lqg = LQG(n_timesteps, n_dim_state, n_dim_obs,\n transition_matrices, transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean, initial_state_covariance,\n control_matrices)\n\n control_gain_lqg = lqg.control_gain_LQR_infinite(rho) * np.ones(n_timesteps)\n\n lqg.plot_state_vs_time(n_timesteps, control_gain_lqg, goal = np.zeros(T),\n title='State estimation with KF (LQG controller)')\n\nwidget=interactive(simulate_kf_with_lqg, {'manual': True},\n D = fixed(.9),\n B = fixed(2.),\n C = fixed(1.),\n proc_noise = fixed(.1),\n meas_noise = fixed(.2),\n T = fixed(50),\n ini_state = fixed(5.),\n rho=(0., 5., 1.))\n\nwidget.children[-2].description = 'Run Simulation'\nwidget.children[-2].style.button_color = 'lightgreen'\ncontrols = HBox(widget.children[:-1], layout = Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]));",
"_____no_output_____"
]
],
[
[
"### Interactive Demo 4.4: How does the process noise and the measurement noise influence the controlled state and desired action?\n\nProcess noise $w_t$ (proc_noise) and measurement noise $v_t$ (meas_noise) have very different effects on the controlled state. \n\n(a) To visualize this, play with the sliders to get an intuition for how process noise and measurement noise influences the controlled state. How are these two sources of noise different?\n\n(b) Next, for varying levels of process noise and measurement noise (note that the control policy is exactly the same for all these values), plot the mean squared error (MSE) between state and the goal, as well as the control cost. What do you notice?\n",
"_____no_output_____"
]
],
[
[
"#@markdown Make sure you execute this cell to enable the widget!\n\ndef lqg_slider(D=0.9, B=2., C=1., T=50, ini_state=5,\n proc_noise=2.9, meas_noise=0., rho=1.):\n\n # Format the above variables into a format acccepted by the Kalman Filter\n # Format the above variables into a format acccepted by the Kalman Filter\n n_dim_state = 1\n n_dim_obs = 1\n n_timesteps = T\n\n transition_matrices = np.eye(n_dim_state) * D\n transition_covariance = np.eye(n_dim_obs) * proc_noise # process noise\n observation_matrices = np.eye(n_dim_state) * C\n observation_covariance = np.eye(n_dim_obs) * meas_noise\n initial_state_mean = np.ones(n_dim_state) * ini_state\n initial_state_covariance = np.eye(n_dim_state) * .01\n control_matrices = np.eye(n_dim_state) * B\n rho = 1\n\n lqg = LQG(n_timesteps, n_dim_state, n_dim_obs,\n transition_matrices, transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean, initial_state_covariance, control_matrices)\n\n control_gain_lqg = lqg.control_gain_LQR_infinite(rho) * np.ones(n_timesteps)\n lqg.plot_state_vs_time(n_timesteps, control_gain_lqg, goal = np.zeros(n_timesteps),\n title='State estimation with KF (LQG controller)')\n\nwidget=interactive(lqg_slider, {'manual': True},\n D = fixed(.9),\n B = fixed(2.),\n C = fixed(1.),\n proc_noise = (0., 3., .1),\n meas_noise = (0.1, 3., .1),\n T = fixed(50),\n ini_state = fixed(5.),\n rho=fixed(1.))\n\nwidget.children[-2].description = 'Run Simulation'\nwidget.children[-2].style.button_color = 'lightgreen'\ncontrols = HBox(widget.children[:-1], layout = Layout(flex_flow='row wrap'))\noutput = widget.children[-1]\ndisplay(VBox([controls, output]));",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_baaf321d.py)\n\n",
"_____no_output_____"
],
[
"## Section 4.2 Noise effects on the LQG\n\nWe can now quantify how the state cost and control costs changes when we change the process and measurement noise levels. To do so, we will run many simulations, stepping through levels of process and measurement noise, tracking MSE and cost of control for each. Run the cell below to perform this simulations and plot them. How do you interpret the results?",
"_____no_output_____"
]
],
[
[
"#@markdown Execute this cell to to quantify the dependence of state and control\n#@markdown cost on process and measurement noise (takes ~20 seconds)\n\nD = 0.9 # state parameter\nB = 2 # control parameter\nC = 1 # measurement parameter\nnoise_var = 0.1\nT = 200 # time horizon\nini_state = 5 # initial state\nprocess_noise_var = 0.1 # process noise\nmeasurement_noise_var = 0.2 # measurement noise\nrho = 1\n\n# Format the above variables into a format acccepted by the Kalman Filter\nn_dim_state = 1\nn_dim_obs = 1\nn_timesteps = T\n\ntransition_matrices = np.eye(n_dim_state) * D\ntransition_covariance = np.eye(n_dim_obs) * noise_var # process noise\nobservation_matrices = np.eye(n_dim_state) * C\nobservation_covariance = np.eye(n_dim_obs) * measurement_noise_var\ninitial_state_mean = np.ones(n_dim_state) * ini_state\ninitial_state_covariance = np.eye(n_dim_state) * .01\ncontrol_matrices = np.eye(n_dim_state) * B\n\n# Implement LQG control over n_iter iterations, and record the MSE between state and goal\nMSE_array_N_meas = []\nMSE_array_N_proc = []\nJcontrol_array_N_meas = []\nJcontrol_array_N_proc = []\nn_iter = 10\nmeas_noise_array = np.linspace(0,3,20)\nproc_noise_array = np.linspace(0.1,3,20)\n\nfor i in range(n_iter):\n MSE_array = np.zeros(proc_noise_array.shape)\n Jcontrol_array = np.zeros(meas_noise_array.shape)\n for i in range(len(proc_noise_array)):\n\n transition_covariance = np.eye(n_dim_obs) * proc_noise_array[i]\n observation_covariance = np.eye(n_dim_obs) * measurement_noise_var\n lqg = LQG(n_timesteps, n_dim_state, n_dim_obs,\n transition_matrices, transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean, initial_state_covariance, control_matrices)\n\n control_gain_lqg = lqg.control_gain_LQR_infinite(rho) * np.ones(n_timesteps) # Get the control gain\n filtered_state_means_impl, filtered_state_covariances_impl, latent, measurement, control = lqg.filter_control(\n n_timesteps, control_gain_lqg)\n MSE_array[i] = lqg.calculate_J_state(latent)\n Jcontrol_array[i] = lqg.calculate_J_control(control)\n\n MSE_array_N_proc.append(MSE_array)\n Jcontrol_array_N_proc.append(Jcontrol_array)\n\n\n MSE_array = np.zeros(meas_noise_array.shape)\n Jcontrol_array = np.zeros(meas_noise_array.shape)\n for i in range(len(meas_noise_array)):\n\n observation_covariance = np.eye(n_dim_obs) * meas_noise_array[i]\n transition_covariance = np.eye(n_dim_obs) * noise_var\n lqg = LQG(n_timesteps, n_dim_state, n_dim_obs,\n transition_matrices, transition_covariance, observation_matrices,\n observation_covariance, initial_state_mean, initial_state_covariance, control_matrices)\n\n control_gain_lqg = lqg.control_gain_LQR_infinite(rho) * np.ones(n_timesteps) # Get the control gain\n filtered_state_means_impl, filtered_state_covariances_impl, latent, measurement, control = lqg.filter_control(\n n_timesteps, control_gain_lqg)\n MSE_array[i] = lqg.calculate_J_state(latent)\n Jcontrol_array[i] = lqg.calculate_J_control(control)\n\n MSE_array_N_meas.append(MSE_array)\n Jcontrol_array_N_meas.append(Jcontrol_array)\n\nMSE_array_proc_mean = np.mean(np.array(MSE_array_N_proc), axis = 0)\nMSE_array_proc_std = np.std(np.array(MSE_array_N_proc), axis = 0)\nMSE_array_meas_mean = np.mean(np.array(MSE_array_N_meas), axis = 0)\nMSE_array_meas_std = np.std(np.array(MSE_array_N_meas), axis = 0)\n\nJcontrol_array_proc_mean = np.mean(np.array(Jcontrol_array_N_proc), axis = 0)\nJcontrol_array_proc_std = np.std(np.array(Jcontrol_array_N_proc), axis = 0)\nJcontrol_array_meas_mean = np.mean(np.array(Jcontrol_array_N_meas), axis = 0)\nJcontrol_array_meas_std = np.std(np.array(Jcontrol_array_N_meas), axis = 0)\n\n# Visualize the quantification\nf, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 8))\n\naxs[0,0].plot(proc_noise_array, MSE_array_proc_mean, 'r-')\naxs[0,0].fill_between(proc_noise_array, MSE_array_proc_mean+MSE_array_proc_std,\n MSE_array_proc_mean-MSE_array_proc_std, facecolor='tab:gray', alpha=0.5)\naxs[0,0].set_title('Effect of process noise')\naxs[0,0].set_ylabel('State Cost (MSE between state and goal)')\n\naxs[0,1].plot(meas_noise_array, MSE_array_meas_mean, 'r-')\naxs[0,1].fill_between(meas_noise_array, MSE_array_meas_mean+MSE_array_meas_std,\n MSE_array_meas_mean-MSE_array_meas_std, facecolor='tab:gray', alpha=0.5)\naxs[0,1].set_title('Effect of measurement noise')\n\naxs[1,0].plot(proc_noise_array, Jcontrol_array_proc_mean, 'r-')\naxs[1,0].fill_between(proc_noise_array, Jcontrol_array_proc_mean+Jcontrol_array_proc_std,\n Jcontrol_array_proc_mean-Jcontrol_array_proc_std, facecolor='tab:gray', alpha=0.5)\naxs[1,0].set_xlabel('Process Noise')\naxs[1,0].set_ylabel('Cost of Control')\n\naxs[1,1].plot(meas_noise_array, Jcontrol_array_meas_mean, 'r-')\naxs[1,1].fill_between(meas_noise_array, Jcontrol_array_meas_mean+Jcontrol_array_meas_std,\n Jcontrol_array_meas_mean-Jcontrol_array_meas_std, facecolor='tab:gray', alpha=0.5)\naxs[1,1].set_xlabel('Measurement Noise')\nplt.show()",
"_____no_output_____"
]
],
[
[
"[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D3_OptimalControl/solutions/W3D3_Tutorial2_Solution_b6faccff.py)\n\n",
"_____no_output_____"
],
[
"---\n# Summary\n\nIn this tutorial, you have extended the idea of optimal policy to the Astrocat example. You have learned about how to design an optimal controller with full observation of the state (linear quadratic regulator - LQR), and under partial observability of the state (linear quadratic gaussian - LQG).",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e79d1ffa8decf44ef49d3971a0ecdaaa99e61dbc | 434,145 | ipynb | Jupyter Notebook | code/concept.ipynb | dandexler/spotify-exercise | e0b17d42890d1cbc69d0ca7c840851d8204eedf4 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | code/concept.ipynb | dandexler/spotify-exercise | e0b17d42890d1cbc69d0ca7c840851d8204eedf4 | [
"MIT",
"BSD-3-Clause"
] | 6 | 2020-02-25T20:10:50.000Z | 2020-03-03T05:15:32.000Z | code/concept.ipynb | dandexler/spotiPylot | e0b17d42890d1cbc69d0ca7c840851d8204eedf4 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | 194.683857 | 33,872 | 0.868247 | [
[
[
"# spotiPylot\n## The collaborative playlist generator.\n### By: David Andexler\nProof-of-Concept\nDescription: A Python application that allows users to generate collaborative playlists based on music each participant is likely to enjoy. In the current iteration, best results will be obtained with fewer than five participants.\n\nOverview of concept: Accept three playlists. These playlists are, in theory, representative of independent users with distinct musical interests. Perform fuzzy clustering to identify 3 non-exclusive clusters\n\n<br> Estimated time: ~8 hours\n<br> Actual time: ~10 hours\n<br> Extensible: Yes\n<br> Next steps: Build a front-end application in JavaScript and move beyond demo playlists. Allow for retrieval of public playlists and selective inclusion of each playlist by contributing user.\n\n### Utility Functions\nLocated in util.py. Will be integrated into dedicated application beyond Jupyter concept.\n<ul>\n<li> get_users() </li>\n<li> get_playlists(spotify, users) </li>\n<li> get_track_features(spotify, playlist_information) </li>\n<li> plot_distributions(df, drop=None) </li>\n</ul>",
"_____no_output_____"
],
[
"## <br>Setting Up the Environment\nPackages loaded. Local environment variables were created for SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET, and SPOTIFY_USER_ID to hide these sensitive items. Subsequently imported using the os module.",
"_____no_output_____"
],
[
"### Import packages",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport util",
"_____no_output_____"
]
],
[
[
"### Loading Environment Variables",
"_____no_output_____"
]
],
[
[
"SPOTIFY_CLIENT_ID = os.environ['SPOTIFY_CLIENT_ID']\nSPOTIFY_CLIENT_SECRET = os.environ['SPOTIFY_CLIENT_SECRET']\nSPOTIFY_REDIRECT_URI = 'http://localhost:8888/callback/'\nusername = os.environ['SPOTIFY_USER_ID']",
"_____no_output_____"
]
],
[
[
" ## <br> Authorization",
"_____no_output_____"
],
[
"### Client Authorization",
"_____no_output_____"
]
],
[
[
"scope = 'playlist-modify-public'\nuser_token = spotipy.util.prompt_for_user_token(username, scope, SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET, SPOTIFY_REDIRECT_URI)\nspotify = spotipy.Spotify(auth=user_token)",
"_____no_output_____"
]
],
[
[
"## <br> Data Acquisition",
"_____no_output_____"
],
[
"### Get Playlist Tracks\nFor demonstration purposes, three pre-populated playlists are utilized, representing three independent users using the application to collaborate on a shared playlist. The example playlists have a general \"theme\" and the tracks were selected based only on the \"theme\" and my general familiarity with the music. For the purposes of this notebook, three playlists are loaded in below. However, if successfully authenticated through Spotify API, running the below code block will allow user to enter any number of Spotify usernames, view all public playlists, and opt to include the playlists in the analysis.\n\nPlaylist composition can be viewed directly on Spotify.\n<br>\n<a href =\"https://open.spotify.com/playlist/2V4jDbJJT7S575jdvrBuzV\">User 1 </a>\n<br>\n<a href =\"https://open.spotify.com/playlist/3gEikQyspYXdGwOdZwiFOj\">User 2 </a>\n<br>\n<a href =\"https://open.spotify.com/playlist/2I9p1xsjQGKljEviMQM5Lm\">User 3 </a>",
"_____no_output_____"
]
],
[
[
"users = util.get_users()\nplaylist_information = util.get_playlists(spotify=spotify, users=users)\nplaylist_information",
"Enter Spotify username or user ID: dandexler\nEnter another user? [y/n] n\nUsers selected: ['dandexler'] \n\n\nUser: dandexler\nPlaylist Information: \nname git init Playlist\ndescription spotiPylot example playlist\nid 4HQZWfYLja8sps8Gkk10EY\ntracks https://api.spotify.com/v1/playlists/4HQZWfYLj...\ntotal_tracks 100\ndtype: object\n\nInclude playlist? [y/n] [> next user, q=done] n\n\n\nPlaylist Information: \nname User3\ndescription \nid 2I9p1xsjQGKljEviMQM5Lm\ntracks https://api.spotify.com/v1/playlists/2I9p1xsjQ...\ntotal_tracks 50\ndtype: object\n\nInclude playlist? [y/n] [> next user, q=done] y\n\n\nPlaylist Information: \nname User2\ndescription \nid 3gEikQyspYXdGwOdZwiFOj\ntracks https://api.spotify.com/v1/playlists/3gEikQysp...\ntotal_tracks 50\ndtype: object\n\nInclude playlist? [y/n] [> next user, q=done] y\n\n\nPlaylist Information: \nname User1\ndescription \nid 2V4jDbJJT7S575jdvrBuzV\ntracks https://api.spotify.com/v1/playlists/2V4jDbJJT...\ntotal_tracks 50\ndtype: object\n\nInclude playlist? [y/n] [> next user, q=done] y\n\n\n"
]
],
[
[
"<center> <b> Table 01: Demo playlists and identifying information. <b> </center>",
"_____no_output_____"
],
[
"### Format Final DataFrame\nSalient audio features needed for the preliminary analysis are extracted and formatted. Information about each audio feature can be found at <a href = \"https://developer.spotify.com/documentation/web-api/reference/tracks/get-audio-features/\"> Spotify for Developers </a>.",
"_____no_output_____"
]
],
[
[
"final_df = util.get_track_features(spotify=spotify, playlist_information=playlist_information)\nfinal_df = final_df.drop(columns=['track_id', 'type', 'id', 'uri', 'track_href', 'analysis_url', 'time_signature'])\nfinal_df",
"_____no_output_____"
]
],
[
[
"<center> <b> Table 02: Combined Tracks and Audio Features.",
"_____no_output_____"
],
[
"## <br>Analysis",
"_____no_output_____"
],
[
"### Exploratory Data Analysis\nPlaylist name, artist name, track name, and time signature were dropped from the EDA table. Kernel density estimates were generated for each numeric variable.",
"_____no_output_____"
]
],
[
[
"util.plot_distributions(final_df, drop=['playlist_name', 'artist_name', 'track_name'])",
"_____no_output_____"
]
],
[
[
"<center> <b> Figure 01 - Figure 12: Distributions of Audio Features. </b> </center>",
"_____no_output_____"
],
[
"## <br> Clustering Using Fuzzy Sets\n\nThe aim of this project is to identify the joint similarities between all users (represented by each playlist). Some clustering techniques force all observations to be in one of the identified clusters. In this concept presentation, I would like to allow for a nuanced approach to musical similarity by allowing clusters based on fuzzy sets (Zadeh, 1965). Given more time, I would like to pursue a density-based approach or overlapping clustering techniques (Baadel et al. 2016). \n\nFuzzy clustering is a clustering technique that utilizes sets in which each element has degrees of membership in the others. Given that users may have overlapping musical interests, this clustering technique seems appropriate for experimentation. Fuzzy c-means is a computationally-intensive algorithm. Thus, I will standardize the input data and perform principal component analysis (PCA) to reduce dimensionality.\n",
"_____no_output_____"
],
[
"### Import Packages",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport skfuzzy as fuzz",
"_____no_output_____"
]
],
[
[
"### Principal Component Analysis \nProjecting the playlist features into fewer dimensions.",
"_____no_output_____"
],
[
"#### Standardizing the Features\nFeatures were standardized using scikit-learn's StandardScaler.",
"_____no_output_____"
]
],
[
[
"final_df_drop = final_df.drop(columns=['playlist_name', 'artist_name', 'track_name'])\nstdz_values = StandardScaler().fit_transform(final_df_drop)",
"_____no_output_____"
]
],
[
[
"#### PCA\nThe number of principal components was selected by roughly optimizing variance explained against complexity. Six principal components were selected to explain ~73% of the variance in the playlists. If I selected two dimensions, variance explained would be ~ 35%. Though I am giving up visualization capabilities, I believe this will lead to better playlist selections.",
"_____no_output_____"
]
],
[
[
"components = [x+1 for x in range(12)]\npca_obj = PCA(n_components=12)\npca_obj.fit_transform(stdz_values)\n\n# Scree plot and selected number of principal components\np1 = sns.pointplot(x=components, y=pca_obj.explained_variance_ratio_)\np1.set(xlabel='Number Components', ylabel='Variance Explained Ratio')\nplt.axvline(5, 0, 1.2, color='r')",
"_____no_output_____"
],
[
"pca_obj.explained_variance_ratio_",
"_____no_output_____"
]
],
[
[
"<b> Figure 13: Scree plot of variance explained against number of principal components.",
"_____no_output_____"
]
],
[
[
"# Cumulative sum of variance explained graph with selected components\np2 = sns.pointplot(x=components, y=np.cumsum(pca_obj.explained_variance_ratio_))\np2.set(xlabel='Number Components', ylabel='Cumulative Variance Explained')\nplt.axvline(5, 0, 1, color='r')",
"_____no_output_____"
]
],
[
[
"<b> Figure 14: Variance explained against number of prinicpal components.",
"_____no_output_____"
],
[
"#### PCA, continued.\nSix principal components were selected and fitted to the standardized data.",
"_____no_output_____"
]
],
[
[
"pca_obj = PCA(n_components=2)\npca_data = pca_obj.fit_transform(stdz_values)\npcaDF = pd.DataFrame(pca_data, columns = ['pc1', 'pc2', 'pc3', 'pc4', 'pc5', 'pc6'])\npcaDF",
"_____no_output_____"
]
],
[
[
"<b> Table 04: Three original playlist audio features projected along six principal components.",
"_____no_output_____"
],
[
"### Fuzzy C-Means Clustering\nThree fuzzy clusters are set. Max iterations will be 1000 unless error stopping criterion=0.005. Seed set to 1234 for reproducibility. Visualization of clusters are not possible for 6-dimensional PCA components.\n\nContext for each parameter can be found at <a href=\"https://pythonhosted.org/scikit-fuzzy/api/skfuzzy.html\"> scikit-fuzzy </a>.",
"_____no_output_____"
]
],
[
[
"cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(data=pcaDF, c=3, m=2, error=0.005, maxiter=1000, init=None, seed=1234)\n# Returns array rows x clusters, which are the centers of each feature for each cluster.",
"_____no_output_____"
],
[
"u0",
"_____no_output_____"
]
],
[
[
"### Creating the Playlist",
"_____no_output_____"
],
[
"#### Using the Fuzzy C-Means clusters ",
"_____no_output_____"
],
[
"To populate the playlist, I am looking for 100 songs that have the highest membership in all three fuzzy c-means clusters. This data was obtained from a <a href=\"https://www.kaggle.com/tomigelo/spotify-audio-features\">Kaggle dataset </a> and contains 130,000 tracks and their corresponding audio features. This data set was updated April 2019.",
"_____no_output_____"
]
],
[
[
"new_tracks = pd.read_csv(\"C:/Users/dande/Documents/Projects/spotiPylot/data/SpotifyAudioFeaturesApril2019.csv\")\nnew_tracks",
"_____no_output_____"
]
],
[
[
"<center><b> Table 05: Audio features of 130,000 tracks on Spotify as of April 2019.",
"_____no_output_____"
],
[
"##### PCA on the New Data\nBefore clustering, the new data are projected to six principal components to mirror the playlist data.",
"_____no_output_____"
]
],
[
[
"new_features = new_tracks.loc[:,final_df_drop.columns]\nnew_stdz_values = StandardScaler().fit_transform(new_features)\nnew_pca_obj = PCA(n_components=6)\nnew_pca_data = new_pca_obj.fit_transform(new_stdz_values)\nnew_pcaDF = pd.DataFrame(new_pca_data, columns = ['pc1', 'pc2', 'pc3', 'pc4', 'pc5', 'pc6'])\nnew_pcaDF",
"_____no_output_____"
]
],
[
[
"<b> Table 06: Six principal components of the new data.",
"_____no_output_____"
],
[
"#### Determining fuzzy c-means cluster membership\nRandomly sample 150 songs from the PCA dataframe, use the fuzzy c-means to predict fuzzy partition coefficient. After n random draws, the sample with the highest FPC is loaded into the playlist. Had to remove seed to get different results. May get better results by increasing range at the expense of computation time. *See considerations",
"_____no_output_____"
]
],
[
[
"fpc = 0\nfor i in range(1000):\n new_pca_sample = new_pcaDF.sample(100)\n new_model = fuzz.cluster.cmeans_predict(new_pca_sample, cntr, 2, error=0.005, maxiter=1000)\n if new_model[-1] > fpc:\n new_df = new_pca_sample.copy() # If the FPC is greater than previous, saves FPC and the indices of the dataframe\n fpc = new_model[-1] # Updates FPC",
"_____no_output_____"
]
],
[
[
"#### Retrieving Selected Tracks\nRetrieves by index.",
"_____no_output_____"
]
],
[
[
"final_full_tracks = new_tracks.iloc[new_df.index,:]\nfinal_full_tracks",
"_____no_output_____"
]
],
[
[
"<center> <b> Table 07: Tracks selected by the clustering algorithm. </b> </center>",
"_____no_output_____"
],
[
"#### Creating the Playlist and Populating with Clustered Tracks\n<B>WARNING: THIS WILL CREATE A PLAYLIST ON YOUR PROFILE FOR EACH TIME YOU RUN IT. RUN ONCE.",
"_____no_output_____"
]
],
[
[
"util.create_playlist(tracks=final_full_tracks, spotify=spotify, username=username)\n",
"_____no_output_____"
]
],
[
[
"## Next Steps\n<ul style=\"list-style-type:disc;\">\n <li>Improve algorithm</li>\n <ul>\n <li>Multi-Cluster Overlapping K-means Extension (MCOKE)\n </ul>\n <li>Engineer new features for comparison </li>\n <li>Evaluate edge cases</li>\n <li>Create user interface/applet in JavaScript</li>\n <li>Improve song cataloguing for faster matching\n <li>Refactor some code, add list comprehensions, refactor the Seaborn graphics\n <li>Find a better way to find new songs, other than data set\n <li>Stress testing with playlists that are not \"themed\"\n</ul>\n\n### Considerations\n\n#### Handling Multiple Artists\nI do not know if the API can return multiple artists for a single track. There is an indexing feature within the JSON response that must be investigated. The code currently handles only one artist response per track.\n#### Handling Multiple Users\nResults may vary, depending on the number of users. Best results are likely obtained with 2-3 users. More work will be needed to scale.\n#### More Time Needed for Clustering Parameters and Research\nThe qualitative nature of music enjoyment contributes to the imprecision of comparing clustering techniques. In the future, there is an opportunity for \n#### Can Only Draw Random Sample of Equal Size to Input Tracks for Prediction\nWill need to explore the quirks of this package more in depth.\n#### Data Set Used to Match Tracks May Be Flawed\nResults may vary. More testing needed.",
"_____no_output_____"
],
[
"## Citations\nAggarwal, Charu C., Alexander Hinneburg, and Daniel A. Keim. \"On the surprising behavior of distance metrics in high dimensional space.\" International conference on database theory. Springer, Berlin, Heidelberg, (2001).\n\nBaadel, S., Thabtah, F., and Lu, J. \"Overlapping clustering: A review,\" 2016 SAI Computing Conference, London, (2016): 233-237.\n\nBezdek, J. \"Pattern Recognition with Fuzzy Objective Function Algoritms\", Plenum Press, New York, (1981).\n\nDunn, J. \"A Fuzzy Relative of the ISODATA Process and Its Use in Detecting Compact Well-Separated Clusters\", Journal of Cybernetics 3: (1973): 32-57.\n\n\nZadeh, L.A. \"Fuzzy sets.\" Information and control 8.3 (1965): 338-353.\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e79d31a3f522b856ecdd8bf88161879edeeb275f | 6,949 | ipynb | Jupyter Notebook | .ipynb_checkpoints/Summary of Trending Topics for the Day-checkpoint.ipynb | Coolbreeze151/RPA-with-TagUI | 118ae4542f540d155324403b35a22988cd196906 | [
"MIT"
] | 1 | 2020-09-05T12:49:14.000Z | 2020-09-05T12:49:14.000Z | Summary of Trending Topics for the Day.ipynb | Coolbreeze151/RPA-with-TagUI | 118ae4542f540d155324403b35a22988cd196906 | [
"MIT"
] | null | null | null | Summary of Trending Topics for the Day.ipynb | Coolbreeze151/RPA-with-TagUI | 118ae4542f540d155324403b35a22988cd196906 | [
"MIT"
] | 1 | 2021-02-09T19:38:07.000Z | 2021-02-09T19:38:07.000Z | 34.40099 | 245 | 0.545978 | [
[
[
"## Summary of Trending Topics for the Day\n\nIn this script we will pull data on what's trending for the day in Singapore. We will use [Google Trends](https://trends.google.com/trends/?geo=SG) as the primary platform to get the latest news/information on what is trending for the day.",
"_____no_output_____"
]
],
[
[
"import tagui as t",
"_____no_output_____"
],
[
"#Visiting the URL to get the daily trends for today\nt.init(visual_automation = True, chrome_browser = True)\nt.url('https://trends.google.com/trends/trendingsearches/daily?geo=SG')\nheader1 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[1]/feed-item/ng-include/div/div/div[1]/div[2]/div[1]')\nheader2 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[2]/feed-item/ng-include/div/div/div[1]/div[2]/div[1]')\nheader3 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[3]/feed-item/ng-include/div/div/div[1]/div[2]/div[1]')\nheader4 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[4]/feed-item/ng-include/div/div/div[1]/div[2]/div[1]')\nheader5 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[5]/feed-item/ng-include/div/div/div[1]/div[2]/div[1]')\nsearch1 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[1]/feed-item/ng-include/div/div/div[1]/div[3]/ng-include/div')\nsearch2 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[2]/feed-item/ng-include/div/div/div[1]/div[3]/ng-include/div')\nsearch3 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[3]/feed-item/ng-include/div/div/div[1]/div[3]/ng-include/div')\nsearch4 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[4]/feed-item/ng-include/div/div/div[1]/div[3]/ng-include/div')\nsearch5 = t.read('/html/body/div[2]/div[2]/div/div[2]/div/div[1]/ng-include/div/div/div/div/md-list[5]/feed-item/ng-include/div/div/div[1]/div[3]/ng-include/div')\n\nt.snap('page','trend.png')\nt.close()\n## We will do a simple score calculator here\n# print(score)\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n \n \n#We will now remove all unnecessary whitespaces\nheader1=\"\".join(header1.split())\nheader2=\"\".join(header2.split())\nheader3=\"\".join(header3.split())\nheader4=\"\".join(header4.split())\nheader5=\"\".join(header5.split())\n\nsearch1=\"\".join(search1.split())\nsearch2=\"\".join(search2.split())\nsearch3=\"\".join(search3.split())\nsearch4=\"\".join(search4.split())\nsearch5=\"\".join(search5.split())\n\nheader1 = header1.replace(\"share\",\"\")\nheader2= header2.replace(\"share\",\"\")\nheader3 = header3.replace(\"share\",\"\")\nheader4 = header4.replace(\"share\",\"\")\nheader5 = header5.replace(\"share\",\"\")\n\nprint(color.UNDERLINE+color.BOLD+'Top 5 Search Trends Today'+color.END)\nprint(\"1) \"+header1+\" (\"+search1+\")\")\nprint(\"2) \"+header2+\" (\"+search2+\")\")\nprint(\"3) \"+header3+\" (\"+search3+\")\")\nprint(\"4) \"+header4+\" (\"+search4+\")\")\nprint(\"5) \"+header5+\" (\"+search5+\")\")\n",
"\u001b[4m\u001b[1mTop 5 Search Trends Today\u001b[0m\n1) CebuPacific (5K+searches)\n2) Russia (5K+searches)\n3) JuventusvsUdinese (5K+searches)\n4) Dolittle (2K+searches)\n5) Lakers (2K+searches)\n"
],
[
"from IPython.display import Image\nfrom IPython.core.display import HTML \nImage(url= \"trend.png\")",
"_____no_output_____"
],
[
"from IPython.display import HTML\n\nHTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\n<form action=\"javascript:code_toggle()\"><input type=\"submit\" value=\"Click here to Hide/Show the raw code.\"></form>''')",
"_____no_output_____"
]
],
[
[
"Access the top search trends [here](https://trends.google.com/trends/trendingsearches/daily?geo=SG)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e79d3bd972999377168bba7ab98f52031c6e2c7a | 930,272 | ipynb | Jupyter Notebook | Simulate_Scratching.ipynb | gened1080/record-scratching | 31b82d4e32b3add183b3c8204c237e6f52ccf0e1 | [
"MIT"
] | null | null | null | Simulate_Scratching.ipynb | gened1080/record-scratching | 31b82d4e32b3add183b3c8204c237e6f52ccf0e1 | [
"MIT"
] | null | null | null | Simulate_Scratching.ipynb | gened1080/record-scratching | 31b82d4e32b3add183b3c8204c237e6f52ccf0e1 | [
"MIT"
] | null | null | null | 865.369302 | 369,966 | 0.90121 | [
[
[
"# Record Scratching Simulator\nThis notebook allows you to read an audio file and modify the signal by adding a scratching effect. The scratching effect is simulated by taking a short audio signal, speeding it up in the forward followed by the reverse direction, and inserting the modified signal into the original audio. The idea is to mimic the movement of a record in a \"baby scratch\". The user can enter the amount of speedup, the timestamp in the original track where they want to insert the scratching effect, and the number of scratches. ",
"_____no_output_____"
]
],
[
[
"%%bash\n!(stat -t /usr/local/lib/*/dist-packages/google/colab > /dev/null 2>&1) && exit \nrm -rf record-scratching/\ngit clone https://github.com/gened1080/record-scratching.git\npip install pydub\nsudo apt-get install libasound-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg",
"Requirement already satisfied: pydub in /usr/local/lib/python3.7/dist-packages (0.25.1)\nReading package lists...\nBuilding dependency tree...\nReading state information...\nlibportaudio2 is already the newest version (19.6.0-1).\nlibportaudiocpp0 is already the newest version (19.6.0-1).\nportaudio19-dev is already the newest version (19.6.0-1).\nlibasound2-dev is already the newest version (1.1.3-5ubuntu0.6).\nffmpeg is already the newest version (7:3.4.8-0ubuntu0.2).\n0 upgraded, 0 newly installed, 0 to remove and 37 not upgraded.\n"
],
[
"# Import relevant packages\nimport sys\nsys.path.append('/content/record-scratching/')\nimport AudioS as asc\nfrom bokeh.io import output_notebook\nimport IPython.display as ipd\noutput_notebook()",
"_____no_output_____"
]
],
[
[
"## Read audio file and plot signal\n\nWe start by creating an object of the `AudioS` class, which also calls the method for reading an audio file. The files for reading need to be placed in the samples directory. After the file is read, the audio signal is extracted and plotted below.",
"_____no_output_____"
]
],
[
[
"# create object of AudioS class and read file\nscr = asc.AudioS()\n\n# plot the audio signal in the file\nscr.plot_signal(scr.original_sound, 'original')",
"----------------------\nEnter the audio filename you want to read including the extension: ahh.wav\n----------------------\n"
]
],
[
[
"## Play Audio\nThe original audio track can be played below",
"_____no_output_____"
]
],
[
[
"# play audiotrack\nipd.Audio(scr.original_signal, rate=scr.framerate)",
"_____no_output_____"
]
],
[
[
"## Scratching Effect\n\nThe `scratch_audio` method implements the scratching effect simulator. The user is asked to specify the amount of speedup during a scratch. We assume that the scratch is done over a quarter rotation of the record. Then using the user-entered speedup, we determine the duration of the scratch. The user is also asked to enter the timestamp (in milliseconds) where they want to insert the scratch effect. Using this timestamp and the scratch duration, we clip the appropriate portion of the original audio signal. Finally, the user is asked to enter the number of scratches to insert. The clipped audio is spedup in the forward direction and in the reverse direction. The forward and reserve audio signals are appended and repeated depending on the number of scratches entered by the user. Finally, the scracthed audio is inserted back into the original signal at the timestamp specifed earlier. \n\nAn FFT of the audio clip that is modified and the original is calculated and the results plotted together for comparison. \n\nThe user is also asked whether they would like to save the modified audio signal, i.e., signal with the scratched effect, to a file. A `.wav` file is exported from the modified audiosegment and saved in the samples directory. ",
"_____no_output_____"
]
],
[
[
"# Runs the scratching effect simulator\nscr.scratch_audio(scr.original_sound)",
"----------------------\nDo you want to manually enter scratching parameters (y/n): n\n----------------------\n"
]
],
[
[
"## Play Audio with Scratch Effect\n\nThe modified audio with the scratch effect can be played below.",
"_____no_output_____"
]
],
[
[
"# Play audio\nipd.Audio(scr.scratched_signal, rate=scr.framerate)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79d3f624aaff2eb0ee4ef8efa8b2bfbc447c5b7 | 9,177 | ipynb | Jupyter Notebook | notebooks/chapter03_notebook/06_widgets.ipynb | khanparwaz/PythonProjects | 3f5c7bf7780b235ad45f8d3f7dd5b05d6b382a2d | [
"BSD-2-Clause"
] | 820 | 2015-01-01T18:15:54.000Z | 2022-03-06T16:15:07.000Z | notebooks/chapter03_notebook/06_widgets.ipynb | khanparwaz/PythonProjects | 3f5c7bf7780b235ad45f8d3f7dd5b05d6b382a2d | [
"BSD-2-Clause"
] | 31 | 2015-02-25T22:08:09.000Z | 2018-09-28T08:41:38.000Z | notebooks/chapter03_notebook/06_widgets.ipynb | khanparwaz/PythonProjects | 3f5c7bf7780b235ad45f8d3f7dd5b05d6b382a2d | [
"BSD-2-Clause"
] | 483 | 2015-01-02T13:53:11.000Z | 2022-03-18T21:05:16.000Z | 30.89899 | 461 | 0.540809 | [
[
[
"> This is one of the 100 recipes of the [IPython Cookbook](http://ipython-books.github.io/), the definitive guide to high-performance scientific computing and data science in Python.\n",
"_____no_output_____"
],
[
"# 3.6. Creating a custom Javascript widget in the notebook: a spreadsheet editor for Pandas",
"_____no_output_____"
],
[
"You need IPython 2.0+ for this recipe. Besides, you need the [Handsontable](http://handsontable.com) Javascript library. Below are the instructions to load this Javascript library in the IPython notebook.\n\n1. Go [here](https://github.com/warpech/jquery-handsontable/tree/master/dist).\n2. Download `jquery.handsontable.full.css` and `jquery.handsontable.full.js`, and put these two files in `~\\.ipython\\profile_default\\static\\custom\\`.\n3. In this folder, add the following line in `custom.js`:\n`require(['/static/custom/jquery.handsontable.full.js']);`\n4. In this folder, add the following line in `custom.css`:\n`@import \"/static/custom/jquery.handsontable.full.css\"`",
"_____no_output_____"
],
[
"Now, refresh the notebook!",
"_____no_output_____"
],
[
"1. Let's import a few functions and classes.",
"_____no_output_____"
]
],
[
[
"from IPython.html import widgets\nfrom IPython.display import display\nfrom IPython.utils.traitlets import Unicode",
"_____no_output_____"
]
],
[
[
"2. We create a new widget. The `value` trait will contain the JSON representation of the entire table. This trait will be synchronized between Python and Javascript thanks to IPython 2.0's widget machinery.",
"_____no_output_____"
]
],
[
[
"class HandsonTableWidget(widgets.DOMWidget):\n _view_name = Unicode('HandsonTableView', sync=True)\n value = Unicode(sync=True)",
"_____no_output_____"
]
],
[
[
"3. Now we write the Javascript code for the widget. The three important functions that are responsible for the synchronization are:\n\n * `render` for the widget initialization\n * `update` for Python to Javascript update\n * `handle_table_change` for Javascript to Python update",
"_____no_output_____"
]
],
[
[
"%%javascript\nvar table_id = 0;\nrequire([\"widgets/js/widget\"], function(WidgetManager){ \n // Define the HandsonTableView\n var HandsonTableView = IPython.DOMWidgetView.extend({\n \n render: function(){\n // Initialization: creation of the HTML elements\n // for our widget.\n \n // Add a <div> in the widget area.\n this.$table = $('<div />')\n .attr('id', 'table_' + (table_id++))\n .appendTo(this.$el);\n // Create the Handsontable table.\n this.$table.handsontable({\n });\n \n },\n \n update: function() {\n // Python --> Javascript update.\n \n // Get the model's JSON string, and parse it.\n var data = $.parseJSON(this.model.get('value'));\n // Give it to the Handsontable widget.\n this.$table.handsontable({data: data});\n \n // Don't touch this...\n return HandsonTableView.__super__.update.apply(this);\n },\n \n // Tell Backbone to listen to the change event \n // of input controls.\n events: {\"change\": \"handle_table_change\"},\n \n handle_table_change: function(event) {\n // Javascript --> Python update.\n \n // Get the table instance.\n var ht = this.$table.handsontable('getInstance');\n // Get the data, and serialize it in JSON.\n var json = JSON.stringify(ht.getData());\n // Update the model with the JSON string.\n this.model.set('value', json);\n \n // Don't touch this...\n this.touch();\n },\n });\n \n // Register the HandsonTableView with the widget manager.\n WidgetManager.register_widget_view(\n 'HandsonTableView', HandsonTableView);\n});",
"_____no_output_____"
]
],
[
[
"4. Now, we have a synchronized table widget that we can already use. But we'd like to integrate it with Pandas. To do this, we create a light wrapper around a `DataFrame` instance. We create two callback functions for synchronizing the Pandas object with the IPython widget. Changes in the GUI will automatically trigger a change in the `DataFrame`, but the converse is not true. We'll need to re-display the widget if we change the `DataFrame` in Python.",
"_____no_output_____"
]
],
[
[
"from io import StringIO # Python 2: from StringIO import StringIO\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"class HandsonDataFrame(object):\n def __init__(self, df):\n self._df = df\n self._widget = HandsonTableWidget()\n self._widget.on_trait_change(self._on_data_changed, \n 'value')\n self._widget.on_displayed(self._on_displayed)\n \n def _on_displayed(self, e):\n # DataFrame ==> Widget (upon initialization only)\n json = self._df.to_json(orient='values')\n self._widget.value = json\n \n def _on_data_changed(self, e, val):\n # Widget ==> DataFrame (called every time the user\n # changes a value in the graphical widget)\n buf = StringIO(val)\n self._df = pd.read_json(buf, orient='values')\n \n def to_dataframe(self):\n return self._df\n \n def show(self):\n display(self._widget)",
"_____no_output_____"
]
],
[
[
"5. Now, let's test all that! We first create a random `DataFrame`.",
"_____no_output_____"
]
],
[
[
"data = np.random.randint(size=(3, 5), low=100, high=900)\ndf = pd.DataFrame(data)\ndf",
"_____no_output_____"
]
],
[
[
"6. We wrap it in a `HandsonDataFrame` and show it.",
"_____no_output_____"
]
],
[
[
"ht = HandsonDataFrame(df)\nht.show()",
"_____no_output_____"
]
],
[
[
"7. We can now *change* the values interactively, and they will be changed in Python accordingly.",
"_____no_output_____"
]
],
[
[
"ht.to_dataframe()",
"_____no_output_____"
]
],
[
[
"> You'll find all the explanations, figures, references, and much more in the book (to be released later this summer).\n\n> [IPython Cookbook](http://ipython-books.github.io/), by [Cyrille Rossant](http://cyrille.rossant.net), Packt Publishing, 2014 (500 pages).",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e79d4d341c043069cd4a512a09567ec179e8577c | 7,264 | ipynb | Jupyter Notebook | examples/3_WorkflowRegistryUsers.ipynb | kikkomep/life_monitor | 39a676a4661f8b4bee112d189a49be3e1c5acfb0 | [
"MIT"
] | 5 | 2020-07-20T07:52:50.000Z | 2022-01-12T09:23:11.000Z | examples/3_WorkflowRegistryUsers.ipynb | kikkomep/life_monitor | 39a676a4661f8b4bee112d189a49be3e1c5acfb0 | [
"MIT"
] | 62 | 2020-05-25T09:22:14.000Z | 2022-03-25T16:07:32.000Z | examples/3_WorkflowRegistryUsers.ipynb | crs4/life_monitor | fbfdf0d755b329dd46acaef3950a521d0b3d491f | [
"MIT"
] | 5 | 2020-03-13T12:08:54.000Z | 2021-01-26T17:30:36.000Z | 30.141079 | 278 | 0.545292 | [
[
[
"# Query Registry Users",
"_____no_output_____"
],
[
"Before executing this notebook, complete [step 2](./2_WorkflowRegistrySetup.ipynb).",
"_____no_output_____"
]
],
[
[
"lifemonitor_root = \"/home/simleo/git/life_monitor\"\n%cd -q {lifemonitor_root}",
"_____no_output_____"
],
[
"import requests",
"_____no_output_____"
],
[
"lm_base_url = \"https://localhost:8443\"\nlm_token_url = f\"{lm_base_url}/oauth2/token\"",
"_____no_output_____"
],
[
"# Get info on the \"seek\" registry\n!docker-compose exec lm /bin/bash -c \"flask registry show seek\"",
"\r\n\r\n****************************************************************************************************\r\nWorkflow Registry 'seek' (uuid: 8ee6f93b-ac01-4183-b90e-14fec7ac9d22, type: seek_registry) registered!\r\n****************************************************************************************************\r\n\r\n\r\nOAuth2 settings to connect to LifeMonitor:\r\n----------------------------------------------------------------------------------------------------\r\nREGISTRY NAME: seek\r\nREGISTRY API URL: https://seek:3000\r\nREGISTRY CLIENT ID: fOeRRL3Z7tI1i5iHRczz4B0X\r\nREGISTRY CLIENT SECRET: WA1rPDSudxDcQsZpqZosQXGwMx57hghbdMCaE7FBe9OmQLqE\r\nREGISTRY CLIENT ALLOWED SCOPES: registry.info registry.user registry.workflow.read registry.workflow.write registry.user.workflow.read registry.user.workflow.write workflow.read workflow.write testingService.read testingService.write user.profile user.workflow.read\r\nREGISTRY CLIENT ALLOWED FLOWS: ['client_credentials', 'authorization_code', 'refresh_token']\r\nREGISTRY CLIENT REDIRECT URIs: ['https://seek:3000']\r\nREGISTRY CLIENT AUTH METHOD: client_secret_post\r\nAUTHORIZE URL: <LIFE_MONITOR_BASE_URL>/oauth2/authorize/seek\r\nACCESS TOKEN URL: <LIFE_MONITOR_BASE_URL>/oauth2/token\r\nCALLBACK URL: <LIFE_MONITOR_BASE_URL>/oauth2/authorized/seek[?next=<URL>]\r\n\r\n"
],
[
"# Copy registry credentials from the above dump\nCLIENT_ID = \"fOeRRL3Z7tI1i5iHRczz4B0X\"\nCLIENT_SECRET = \"WA1rPDSudxDcQsZpqZosQXGwMx57hghbdMCaE7FBe9OmQLqE\"",
"_____no_output_____"
],
[
"# Enter the following URL in your browser\nf\"{lm_base_url}/oauth2/login/seek\"\n# Then log in as username: user1 and password: workflowhub",
"_____no_output_____"
],
[
"# Get an authorization token from LifeMonitor\ns = requests.session()\ns.verify = False\ns.headers.update({})\ntoken_response = s.post(\n lm_token_url, \n data={\n \"client_id\": CLIENT_ID,\n \"client_secret\": CLIENT_SECRET,\n \"grant_type\": \"client_credentials\",\n \"scope\": \"registry.user\"\n }, allow_redirects=True, verify=False)\nassert token_response.status_code == 200\ntoken = token_response.json()\ntoken",
"/usr/lib/python3/dist-packages/urllib3/connectionpool.py:860: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings\n InsecureRequestWarning)\n"
],
[
"# Update headers with the OAuth2 token\ns.headers.update({'Authorization': f\"Bearer {token['access_token']}\"})",
"_____no_output_____"
],
[
"# Get registry users\nresponse = s.get(f\"{lm_base_url}/registries/current/users\")\nassert response.status_code == 200, f\"Unexpected error {response.status_code}: {response.content}\"\nregistry_users = response.json()\nregistry_users",
"/usr/lib/python3/dist-packages/urllib3/connectionpool.py:860: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings\n InsecureRequestWarning)\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79d52e5f310f39d1f25e7c2b857de1c8717021d | 7,850 | ipynb | Jupyter Notebook | notebooks/_imports.ipynb | pmc-tables/pmc-tables | 3f4adbfff353b83a5dc660010f058192948a8833 | [
"MIT"
] | 1 | 2020-07-29T10:07:33.000Z | 2020-07-29T10:07:33.000Z | notebooks/_imports.ipynb | pmc-tables/pmc-tables | 3f4adbfff353b83a5dc660010f058192948a8833 | [
"MIT"
] | null | null | null | notebooks/_imports.ipynb | pmc-tables/pmc-tables | 3f4adbfff353b83a5dc660010f058192948a8833 | [
"MIT"
] | null | null | null | 23.787879 | 105 | 0.541274 | [
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"%load_ext autoreload\n%autoreload 2",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"# Standard library\nimport concurrent.futures\nimport csv\nimport datetime\nimport functools\nimport glob\nimport gzip\nimport importlib\nimport itertools\nimport json\nimport logging\nimport os\nimport os.path as op\nimport pathlib\nimport pickle\nimport re\nimport shlex\nimport shutil\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport urllib\n\nfrom collections import Counter, defaultdict, OrderedDict, namedtuple\nfrom pathlib import Path\nfrom pprint import pprint\nfrom textwrap import dedent\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport scipy as sp\nimport seaborn as sns\nimport sqlalchemy as sa\nimport tqdm\nimport wurlitzer\nimport yaml\n\nfrom IPython.display import display, HTML, Math, IFrame",
"_____no_output_____"
],
[
"# import kmbio.PDB\n\n# import kmtools\n# import kmtools.df_tools\n# import kmtools.sequence_tools\n# import kmtools.structure_tools\n\n# import elaspic\n# import elaspic.elaspic_predictor\n# import elaspic.structure_tools\n\n# import jobsubmitter\n\n# import odbo\n\n# print2 = kmtools.df_tools.print2",
"_____no_output_____"
],
[
"# Create logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# Create STDERR handler\nhandler = logging.StreamHandler(sys.stderr)\nhandler.setLevel(logging.DEBUG)\n\n# Create formatter and add it to the handler\nformatter = logging.Formatter('%(message)s')\nhandler.setFormatter(formatter)\n\n# Set STDERR handler as the only handler \nlogger.handlers = [handler]",
"_____no_output_____"
],
[
"class LoggingContext(object):\n def __init__(self, logger, level=None, handler=None, close=True):\n self.logger = logger\n self.level = level\n self.handler = handler\n self.close = close\n\n def __enter__(self):\n if self.level is not None:\n self.old_level = self.logger.level\n self.logger.setLevel(self.level)\n if self.handler:\n self.logger.addHandler(self.handler)\n\n def __exit__(self, et, ev, tb):\n if self.level is not None:\n self.logger.setLevel(self.old_level)\n if self.handler:\n self.logger.removeHandler(self.handler)\n if self.handler and self.close:\n self.handler.close()\n # implicit return of None => don't swallow exceptions",
"_____no_output_____"
],
[
"try:\n import local\nexcept ImportError:\n pass",
"_____no_output_____"
],
[
"try:\n fin = open('../.gitlab-ci.yml')\nexcept FileNotFoundError:\n CONFIG = dict()\nelse:\n CONFIG = yaml.load(fin)['variables']\n for key, value in CONFIG.items():\n logger.info(\"Setting the %s environment variable.\", key)\n os.environ[key] = str(value) # environment variables can't be integers or anything else\nfinally:\n fin.close()",
"Setting the PYTHON_VERSION environment variable.\nSetting the SPARK_MASTER environment variable.\nSetting the SPARK_ARGS environment variable.\nSetting the DB_TYPE environment variable.\nSetting the DB_PORT environment variable.\n"
],
[
"os.environ['DB_SCHEMA'] = op.basename(op.dirname(os.getcwd()))\nos.environ['DB_PORT'] = str(CONFIG['DB_PORT'])\nos.environ['DB_TEMPDIR'] = op.join(tempfile.gettempdir(), op.basename(op.dirname(os.getcwd())))",
"_____no_output_____"
],
[
"# Display options\npd.set_option('display.max_rows', 1000)\npd.set_option('display.max_columns', 1000)\npd.set_option('display.width', 1000)\npd.set_option(\"display.max_colwidth\", 120)\npd.set_option('mode.chained_assignment', None)\npd.set_option('io.hdf.default_format','table')\n\nsns.set_style('whitegrid')\nsns.set_context('notebook', font_scale=2)",
"_____no_output_____"
],
[
"print(datetime.datetime.now())",
"2017-11-21 18:30:41.795555\n"
],
[
"_IMPORTS_LOADED = True",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79d583e502d85908069edacb7aee144ba91fafb | 638,102 | ipynb | Jupyter Notebook | notebooks/heatmap.ipynb | hailpam/data-crunching | f0c1e60f5203ab786582cb8fb76126d4dcec9a8a | [
"MIT"
] | null | null | null | notebooks/heatmap.ipynb | hailpam/data-crunching | f0c1e60f5203ab786582cb8fb76126d4dcec9a8a | [
"MIT"
] | null | null | null | notebooks/heatmap.ipynb | hailpam/data-crunching | f0c1e60f5203ab786582cb8fb76126d4dcec9a8a | [
"MIT"
] | null | null | null | 4,691.926471 | 600,642 | 0.622402 | [
[
[
"import sqlite3\nimport numpy as np\nimport seaborn as sb\nimport matplotlib.pyplot as plt\n\nfrom pandas import DataFrame\n\nQUERY = \"\"\"\nSELECT o_time AS \"time\", i_qty AS \"qty\"\nFROM orders\nGROUP BY o_order_id, o_time\nORDER BY o_order_id\n\"\"\"",
"_____no_output_____"
],
[
"try:\n conn = sqlite3.connect('../db/orders_en_GB.db')\n res = conn.execute(QUERY)\n\n df = DataFrame(res.fetchall())\n print(df)\n\n df['hour'] = df[0].apply(lambda x: int(x.split(':')[0]))\n df['minute'] = df[0].apply(lambda x: int(x.split(':')[1]))\n df['value'] = df[1].apply(lambda x: float(x))\n del df[0]\n del df[1]\n print(df)\nfinally:\n if conn:\n conn.close()",
" 0 1\n0 00:00:00+02:00 5.000000\n1 09:10:29+02:00 3.000000\n2 10:10:45+02:00 1.000000\n3 10:24:39+02:00 2.000000\n4 13:25:31+02:00 4.000000\n.. ... ...\n345 04:37:38+02:00 4.000000\n346 05:06:00+02:00 1.000000\n347 07:15:46+02:00 2.000000\n348 07:30:45+02:00 1.000000\n349 09:40:33+02:00 1.000000\n\n[350 rows x 2 columns]\n hour minute value\n0 0 0 5.0\n1 9 10 3.0\n2 10 10 1.0\n3 10 24 2.0\n4 13 25 4.0\n.. ... ... ...\n345 4 37 4.0\n346 5 6 1.0\n347 7 15 2.0\n348 7 30 1.0\n349 9 40 1.0\n\n[350 rows x 3 columns]\n"
],
[
"pt = df.pivot_table(index='minute', columns='hour', values='value', aggfunc='sum').fillna(0)\nprint(pt)",
"hour 0 1 2 4 5 6 7 8 9 10 ... 14 15 \\\nminute ... \n0 12.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 10.0 \n1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 4.0 ... 0.0 0.0 \n3 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.0 ... 0.0 0.0 \n4 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.0 4.0 ... 1.0 0.0 \n5 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 3.0 0.0 \n6 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 ... 4.0 2.0 \n7 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 1.0 0.0 \n8 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 11.0 0.0 \n9 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n10 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.0 3.0 ... 4.0 3.0 \n11 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 6.0 0.0 \n12 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 1.0 3.0 \n13 5.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 ... 0.0 0.0 \n14 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 2.0 \n15 0.0 0.0 0.0 0.0 0.0 0.0 2.0 1.0 0.0 0.0 ... 0.0 3.0 \n16 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 4.0 0.0 \n17 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n18 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 8.0 0.0 \n19 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.0 0.0 ... 0.0 0.0 \n20 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 1.0 0.0 \n21 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 4.0 0.0 \n22 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 12.0 ... 15.0 0.0 \n23 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n24 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.0 ... 1.0 2.0 \n25 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.0 ... 0.0 1.0 \n26 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 2.0 \n27 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.0 ... 0.0 0.0 \n28 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 1.0 0.0 \n29 8.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 4.0 \n30 0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 ... 0.0 0.0 \n31 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 2.0 6.0 \n32 0.0 2.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 6.0 ... 1.0 1.0 \n33 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 ... 0.0 0.0 \n34 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n35 0.0 0.0 0.0 0.0 0.0 0.0 2.0 0.0 1.0 0.0 ... 0.0 1.0 \n36 2.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 3.0 \n37 0.0 0.0 0.0 4.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 2.0 0.0 \n38 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 ... 0.0 1.0 \n39 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 ... 0.0 4.0 \n40 0.0 0.0 1.0 0.0 0.0 0.0 0.0 6.0 1.0 0.0 ... 0.0 0.0 \n41 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 2.0 0.0 ... 0.0 0.0 \n42 0.0 0.0 6.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n43 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n44 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n45 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 ... 2.0 4.0 \n46 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n47 0.0 0.0 2.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 4.0 0.0 \n48 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n49 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 1.0 3.0 \n50 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 ... 0.0 0.0 \n51 0.0 0.0 0.0 0.0 0.0 0.0 2.0 1.0 0.0 1.0 ... 0.0 2.0 \n52 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 2.0 \n53 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 4.0 ... 2.0 0.0 \n54 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.0 ... 0.0 2.0 \n55 0.0 0.0 0.0 0.0 0.0 0.0 0.0 12.0 2.0 0.0 ... 1.0 0.0 \n56 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 \n57 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 8.0 3.0 ... 2.0 1.0 \n58 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.0 0.0 ... 0.0 1.0 \n59 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 1.0 ... 0.0 0.0 \n\nhour 16 17 18 19 20 21 22 23 \nminute \n0 3.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n1 0.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0 \n2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n3 0.0 1.0 0.0 1.0 0.0 1.0 1.0 0.0 \n4 0.0 0.0 2.0 0.0 1.0 0.0 0.0 0.0 \n5 6.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n6 0.0 6.0 5.0 2.0 0.0 0.0 0.0 0.0 \n7 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 \n8 0.0 2.0 2.0 0.0 0.0 0.0 1.0 0.0 \n9 4.0 3.0 0.0 0.0 0.0 1.0 0.0 0.0 \n10 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 \n11 0.0 0.0 2.0 0.0 0.0 0.0 0.0 0.0 \n12 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 \n13 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n14 3.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 \n15 5.0 1.0 1.0 0.0 0.0 0.0 1.0 0.0 \n16 0.0 3.0 0.0 0.0 0.0 1.0 1.0 0.0 \n17 0.0 0.0 2.0 2.0 2.0 0.0 0.0 0.0 \n18 3.0 2.0 0.0 0.0 0.0 0.0 0.0 0.0 \n19 2.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n20 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 \n21 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n22 3.0 0.0 0.0 1.0 2.0 5.0 0.0 0.0 \n23 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 \n24 3.0 0.0 1.0 0.0 3.0 0.0 0.0 0.0 \n25 0.0 0.0 4.0 0.0 0.0 0.0 0.0 0.0 \n26 0.0 0.0 2.0 1.0 1.0 0.0 0.0 0.0 \n27 0.0 0.0 0.0 2.0 0.0 0.0 0.0 0.0 \n28 3.0 0.0 1.0 3.0 0.0 0.0 0.0 0.0 \n29 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 \n30 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n31 0.0 2.0 0.0 1.0 1.0 0.0 0.0 0.0 \n32 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n33 1.0 0.0 0.0 0.0 0.0 0.0 7.0 0.0 \n34 0.0 0.0 1.0 1.0 0.0 3.0 0.0 0.0 \n35 4.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 \n36 4.0 2.0 0.0 0.0 0.0 1.0 0.0 0.0 \n37 0.0 7.0 0.0 0.0 0.0 0.0 2.0 1.0 \n38 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n39 0.0 0.0 0.0 1.0 0.0 0.0 4.0 1.0 \n40 4.0 1.0 1.0 1.0 0.0 0.0 0.0 0.0 \n41 4.0 5.0 0.0 0.0 0.0 0.0 2.0 0.0 \n42 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 \n43 6.0 6.0 0.0 1.0 2.0 0.0 0.0 0.0 \n44 0.0 0.0 0.0 0.0 0.0 2.0 0.0 1.0 \n45 0.0 0.0 0.0 7.0 0.0 1.0 0.0 0.0 \n46 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n47 0.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0 \n48 0.0 0.0 0.0 0.0 0.0 2.0 1.0 0.0 \n49 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 \n50 0.0 0.0 0.0 2.0 0.0 0.0 0.0 0.0 \n51 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n52 0.0 1.0 0.0 0.0 0.0 0.0 0.0 3.0 \n53 0.0 0.0 0.0 0.0 1.0 1.0 0.0 0.0 \n54 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 \n55 0.0 0.0 0.0 0.0 0.0 1.0 3.0 0.0 \n56 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 \n57 3.0 0.0 0.0 0.0 1.0 0.0 2.0 0.0 \n58 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 \n59 0.0 6.0 3.0 0.0 4.0 2.0 0.0 0.0 \n\n[60 rows x 23 columns]\n"
],
[
"hm = sb.heatmap(pt, annot=True, cmap=\"coolwarm\", annot_kws={\"fontsize\": 4})\nhm.set_yticklabels(hm.get_yticklabels(), rotation=0, fontsize=8)\nhm.set_xticklabels(hm.get_xticklabels(), rotation=0, fontsize=8)\n\nplt.title('Orders Concentration Hours')\nplt.ylabel('Minute')\nplt.xlabel('Hour')\nplt.grid(False)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e79d5b8d83ec110bca4cfaada56dbf8206992b44 | 15,576 | ipynb | Jupyter Notebook | NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 3_ Scraping con Selenium/M3C4. Scrapeando escalas y tarifas - Script.ipynb | Alejandro-sin/Learning_Notebooks | 161d6bed4c7b1d171b45f61c0cc6fa91e9894aad | [
"MIT"
] | 1 | 2021-02-26T13:12:22.000Z | 2021-02-26T13:12:22.000Z | NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 3_ Scraping con Selenium/M3C4. Scrapeando escalas y tarifas - Script.ipynb | Alejandro-sin/Learning_Notebooks | 161d6bed4c7b1d171b45f61c0cc6fa91e9894aad | [
"MIT"
] | null | null | null | NoteBooks/Curso de WebScraping/Unificado/web-scraping-master/Clases/Módulo 3_ Scraping con Selenium/M3C4. Scrapeando escalas y tarifas - Script.ipynb | Alejandro-sin/Learning_Notebooks | 161d6bed4c7b1d171b45f61c0cc6fa91e9894aad | [
"MIT"
] | null | null | null | 27.814286 | 310 | 0.549884 | [
[
[
"# Módulo 2: Scraping con Selenium\n## LATAM Airlines\n<a href=\"https://www.latam.com/es_ar/\"><img src=\"https://i.pinimg.com/originals/dd/52/74/dd5274702d1382d696caeb6e0f6980c5.png\" width=\"420\"></img></a>\n<br>\n\nVamos a scrapear el sitio de Latam para averiguar datos de vuelos en funcion el origen y destino, fecha y cabina. La información que esperamos obtener de cada vuelo es:\n- Precio(s) disponibles\n- Horas de salida y llegada (duración)\n- Información de las escalas\n\n¡Empecemos!",
"_____no_output_____"
]
],
[
[
"url = 'https://www.latam.com/es_ar/apps/personas/booking?fecha1_dia=20&fecha1_anomes=2019-12&auAvailability=1&ida_vuelta=ida&vuelos_origen=Buenos%20Aires&from_city1=BUE&vuelos_destino=Madrid&to_city1=MAD&flex=1&vuelos_fecha_salida_ddmmaaaa=20/12/2019&cabina=Y&nadults=1&nchildren=0&ninfants=0&cod_promo='",
"_____no_output_____"
],
[
"from selenium import webdriver",
"_____no_output_____"
],
[
"options = webdriver.ChromeOptions()\noptions.add_argument('--incognito')\ndriver = webdriver.Chrome(executable_path='../../chromedriver', options=options)\ndriver.get(url)",
"_____no_output_____"
],
[
"#Usaremos el Xpath para obtener la lista de vuelos\nvuelos = driver.find_elements_by_xpath('//li[@class=\"flight\"]')",
"_____no_output_____"
],
[
"vuelo = vuelos[0]",
"_____no_output_____"
]
],
[
[
"Obtenemos la información de la hora de salida, llegada y duración del vuelo",
"_____no_output_____"
]
],
[
[
"# Hora de salida\nvuelo.find_element_by_xpath('.//div[@class=\"departure\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Hora de llegada\nvuelo.find_element_by_xpath('.//div[@class=\"arrival\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Duración del vuelo\nvuelo.find_element_by_xpath('.//span[@class=\"duration\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"boton_escalas = vuelo.find_element_by_xpath('.//div[@class=\"flight-summary-stops-description\"]/button')\nboton_escalas",
"_____no_output_____"
],
[
"boton_escalas.click()",
"_____no_output_____"
],
[
"segmentos = vuelo.find_elements_by_xpath('//div[@class=\"segments-graph\"]/div[@class=\"segments-graph-segment\"]')\nsegmentos",
"_____no_output_____"
],
[
"escalas = len(segmentos) - 1 #0 escalas si es un vuelo directo",
"_____no_output_____"
]
],
[
[
"# Clase 13\nEn esta clase obtendremos la información de las escalas que se encuentran en el modal que aparece al clickear sobre el botón de escalas",
"_____no_output_____"
]
],
[
[
"segmento = segmentos[0]",
"_____no_output_____"
],
[
"# Origen\nsegmento.find_element_by_xpath('.//div[@class=\"departure\"]/span[@class=\"ground-point-name\"]').text",
"_____no_output_____"
],
[
"# Hora de salida\nsegmento.find_element_by_xpath('.//div[@class=\"departure\"]/time').get_attribute('datetime')",
"_____no_output_____"
]
],
[
[
"## RETO\nObtener:\n- destino\n- hora de llegada\n- duración del vuelo\n- duración de la escala. *Tip: el último segmento no tendrá esta información*\n- número del vuelo\n- modelo del avión",
"_____no_output_____"
]
],
[
[
"# Destino\nsegmento.find_element_by_xpath('.//div[@class=\"arrival\"]/span[@class=\"ground-point-name\"]').text",
"_____no_output_____"
],
[
"# Hora de llegada\nsegmento.find_element_by_xpath('.//div[@class=\"arrival\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Duración del vuelo\nsegmento.find_element_by_xpath('.//span[@class=\"duration flight-schedule-duration\"]/time').get_attribute('datetime')",
"_____no_output_____"
],
[
"# Numero del vuelo\nsegmento.find_element_by_xpath('.//span[@class=\"equipment-airline-number\"]').text",
"_____no_output_____"
],
[
"# Modelo de avion\nsegmento.find_element_by_xpath('.//span[@class=\"equipment-airline-material\"]').text",
"_____no_output_____"
],
[
"# Duracion de la escala\nsegmento.find_element_by_xpath('.//div[@class=\"stop connection\"]//p[@class=\"stop-wait-time\"]//time').get_attribute('datetime')",
"_____no_output_____"
]
],
[
[
"## CLASE\nUna vez que hayamos obtenido toda la información, debemos cerrar el modal/pop-up.",
"_____no_output_____"
]
],
[
[
"driver.find_element_by_xpath('//div[@class=\"modal-dialog\"]//button[@class=\"close\"]').click()",
"_____no_output_____"
]
],
[
[
"Por último debemos obtener la información de las tarifas. Para eso, debemos clickear sobre el vuelo (sobre cualquier parte)",
"_____no_output_____"
]
],
[
[
"vuelo.click()",
"_____no_output_____"
]
],
[
[
"La información de los precios para cada tarifa está contenida en una tabla. Los precios en sí están en el footer y podemos sacar los nombres de la clase de cada elemento",
"_____no_output_____"
]
],
[
[
"tarifas = vuelo.find_elements_by_xpath('.//div[@class=\"fares-table-container\"]//tfoot//td[contains(@class, \"fare-\")]')",
"_____no_output_____"
],
[
"precios = []\nfor tarifa in tarifas:\n nombre = tarifa.find_element_by_xpath('.//label').get_attribute('for')\n moneda = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"currency-symbol\"]').text\n valor = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"value\"]').text \n dict_tarifa={nombre:{'moneda':moneda, 'valor':valor}}\n precios.append(dict_tarifa)\n print(dict_tarifa)",
"{'LIGHT': {'moneda': 'US$', 'valor': '1282,40'}}\n{'PLUS': {'moneda': 'US$', 'valor': '1335,90'}}\n{'TOP': {'moneda': 'US$', 'valor': '1773,50'}}\n"
]
],
[
[
"Será de gran utilidad armar funciones que resuelvan la extracción de información de cada sección de la página. Por eso te propongo que armes 3 funciones de las cuales te dejo las estructuras:",
"_____no_output_____"
],
[
"## RETO\nArmar funciones para obtener los datos de las escalas y las tarifas. \nTe dejo los prototipos:",
"_____no_output_____"
]
],
[
[
"def obtener_precios(vuelo):\n tarifas = vuelo.find_elements_by_xpath(\n './/div[@class=\"fares-table-container\"]//tfoot//td[contains(@class, \"fare-\")]')\n precios = []\n for tarifa in tarifas:\n nombre = tarifa.find_element_by_xpath('.//label').get_attribute('for')\n moneda = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"currency-symbol\"]').text\n valor = tarifa.find_element_by_xpath('.//span[@class=\"price\"]/span[@class=\"value\"]').text \n dict_tarifa={nombre:{'moneda':moneda, 'valor':valor}}\n precios.append(dict_tarifa)\n return precios",
"_____no_output_____"
],
[
"def obtener_datos_escalas(vuelo):\n segmentos = vuelo.find_elements_by_xpath('//div[@class=\"segments-graph\"]/div[@class=\"segments-graph-segment\"]')\n info_escalas = []\n for segmento in segmentos:\n # Origen\n origen = segmento.find_element_by_xpath(\n './/div[@class=\"departure\"]/span[@class=\"ground-point-name\"]').text\n # Hora de salida\n dep_time = segmento.find_element_by_xpath(\n './/div[@class=\"departure\"]/time').get_attribute('datetime')\n # Destino\n destino = segmento.find_element_by_xpath(\n './/div[@class=\"arrival\"]/span[@class=\"ground-point-name\"]').text\n # Hora de llegada\n arr_time = segmento.find_element_by_xpath(\n './/div[@class=\"arrival\"]/time').get_attribute('datetime')\n # Duración del vuelo\n duracion_vuelo = segmento.find_element_by_xpath(\n './/span[@class=\"duration flight-schedule-duration\"]/time').get_attribute('datetime')\n # Numero del vuelo\n numero_vuelo = segmento.find_element_by_xpath(\n './/span[@class=\"equipment-airline-number\"]').text\n # Modelo de avion\n modelo_avion = segmento.find_element_by_xpath(\n './/span[@class=\"equipment-airline-material\"]').text\n # Duracion de la escala\n if segmento != segmentos[-1]:\n duracion_escala = segmento.find_element_by_xpath(\n './/div[@class=\"stop connection\"]//p[@class=\"stop-wait-time\"]//time').get_attribute('datetime')\n else:\n duracion_escala = ''\n\n # Armo un diccionario para almacenar los datos\n data_dict={'origen': origen, \n 'dep_time': dep_time, \n 'destino': destino,\n 'arr_time': arr_time,\n 'duracion_vuelo': duracion_vuelo,\n 'numero_vuelo': numero_vuelo,\n 'modelo_avion': modelo_avion,\n 'duracion_escala': duracion_escala}\n info_escalas.append(data_dict)\n \n return info_escalas",
"_____no_output_____"
],
[
"def obtener_tiempos(vuelo):\n # Hora de salida\n salida = vuelo.find_element_by_xpath('.//div[@class=\"departure\"]/time').get_attribute('datetime')\n # Hora de llegada\n llegada = vuelo.find_element_by_xpath('.//div[@class=\"arrival\"]/time').get_attribute('datetime')\n # Duracion\n duracion = vuelo.find_element_by_xpath('.//span[@class=\"duration\"]/time').get_attribute('datetime')\n tiempos = {'hora_salida': salida, 'hora_llegada': llegada, 'duracion': duracion}\n return tiempos",
"_____no_output_____"
],
[
"driver.close()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e79d63b771b3502f40a996da532fa4e1d2cbd091 | 6,908 | ipynb | Jupyter Notebook | admin/ocean heat/Untitled.ipynb | HarryStevens/2018-06-18-four-globes | 615a005c0a5251f6d94c4110a97ef70898bb6608 | [
"Apache-2.0"
] | null | null | null | admin/ocean heat/Untitled.ipynb | HarryStevens/2018-06-18-four-globes | 615a005c0a5251f6d94c4110a97ef70898bb6608 | [
"Apache-2.0"
] | null | null | null | admin/ocean heat/Untitled.ipynb | HarryStevens/2018-06-18-four-globes | 615a005c0a5251f6d94c4110a97ef70898bb6608 | [
"Apache-2.0"
] | null | null | null | 50.423358 | 327 | 0.558338 | [
[
[
"import xarray as xr\nmar = xr.open_dataset('heat_content_anomaly_0-700_yearly.nc', decode_times=False)\nprint(mar)",
"<xarray.Dataset>\nDimensions: (depth: 1, lat: 180, lon: 360, nbounds: 2, time: 63)\nCoordinates:\n * lat (lat) float32 -89.5 -88.5 -87.5 -86.5 -85.5 -84.5 ...\n * lon (lon) float32 -179.5 -178.5 -177.5 -176.5 -175.5 ...\n * time (time) float32 6.0 18.0 30.0 42.0 54.0 66.0 78.0 ...\n * depth (depth) int64 0\n * nbounds (nbounds) int64 0 1\nData variables:\n crs int32 -2147483647\n lat_bnds (lat, nbounds) float32 -90.0 -89.0 -89.0 -88.0 -88.0 ...\n lon_bnds (lon, nbounds) float32 -180.0 -179.0 -179.0 -178.0 ...\n depth_bnds (depth, nbounds) float32 0.0 700.0\n climatology_bounds (time, nbounds) float32 0.0 12.0 12.0 24.0 24.0 36.0 ...\n h18_hc (time, depth, lat, lon) float64 nan nan nan nan nan ...\n yearl_h22_WO (time) float32 -3.201 -2.608 -4.613 -1.533 -2.189 ...\n yearl_h22_se_WO (time) float32 1.7 0.719 0.396 0.687 0.863 0.499 ...\n yearl_h22_NH (time) float32 -1.439 -1.843 -2.242 -0.19 -0.554 ...\n yearl_h22_se_NH (time) float32 0.937 0.385 0.186 0.163 0.545 0.695 ...\n yearl_h22_SH (time) float32 -1.762 -0.765 -2.371 -1.343 -1.636 ...\n yearl_h22_se_SH (time) float32 0.771 0.533 0.278 0.676 0.419 0.586 ...\n yearl_h22_AO (time) float32 -1.188 -1.441 -2.544 -0.97 -2.425 ...\n yearl_h22_se_AO (time) float32 0.599 0.393 0.823 0.195 0.284 0.384 ...\n yearl_h22_NA (time) float32 -0.559 -0.918 -1.226 -0.334 -1.27 ...\n yearl_h22_se_NA (time) float32 0.48 0.222 0.36 0.1 0.111 0.293 0.327 ...\n yearl_h22_SA (time) float32 -0.629 -0.523 -1.318 -0.636 -1.155 ...\n yearl_h22_se_SA (time) float32 0.17 0.214 0.487 0.149 0.185 0.258 ...\n yearl_h22_PO (time) float32 -1.877 -0.967 -1.593 -0.635 0.316 ...\n yearl_h22_se_PO (time) float32 1.321 0.556 0.539 0.58 0.345 0.115 ...\n yearl_h22_NP (time) float32 -0.984 -0.828 -0.78 0.16 0.656 0.413 ...\n yearl_h22_se_NP (time) float32 0.608 0.289 0.249 0.261 0.405 0.392 ...\n yearl_h22_SP (time) float32 -0.893 -0.139 -0.813 -0.795 -0.34 ...\n yearl_h22_se_SP (time) float32 0.715 0.4 0.356 0.566 0.137 0.442 ...\n yearl_h22_IO (time) float32 -0.141 -0.198 -0.467 0.07 -0.081 ...\n yearl_h22_se_IO (time) float32 0.129 0.247 0.394 0.34 0.3 0.268 ...\n yearl_h22_NI (time) float32 0.098 -0.095 -0.227 -0.018 0.059 ...\n yearl_h22_se_NI (time) float32 0.088 0.073 0.078 0.076 0.053 0.099 ...\n yearl_h22_SI (time) float32 -0.24 -0.104 -0.24 0.088 -0.14 -0.486 ...\n yearl_h22_se_SI (time) float32 0.061 0.266 0.325 0.367 0.256 0.175 ...\n basin_mask (lat, lon) float64 nan nan nan nan nan nan nan nan ...\nAttributes:\n Conventions: CF-1.6\n title: Ocean Heat Content anomalies from WOA09 : heat_content_anomaly 0-700 m yearly 1.00 degree\n summary: Mean ocean variable anomaly from in situ profile data\n references: Levitus, S., J. I. Antonov, T. P. Boyer, O. K. Baranova, H. E. Garcia, R. A. Locarnini, A.V. Mishonov, J. R. Reagan, D. Seidov, E. S. Yarosh, M. M. Zweng, 2012: World Ocean heat content and thermosteric sea level change (0-2000 m) 1955-2010. Geophys. Res. Lett. , 39, L10603, doi:10.1029/2012GL051106\n institution: National Oceanographic Data Center(NODC)\n comment: \n id: heat_content_anomaly_0-700_yearly.nc\n naming_authority: gov.noaa.nodc\n time_coverage_start: 1955-01-01\n time_coverage_duration: P63Y\n time_coverage_resolution: P01Y\n geospatial_lat_min: -90.0\n geospatial_lat_max: 90.0\n geospatial_lon_min: -180.0\n geospatial_lon_max: 180.0\n geospatial_vertical_min: 0.0\n geospatial_vertical_max: 700.0\n geospatial_lat_units: degrees_north\n geospatial_lat_resolution: 1.00 degrees\n geospatial_lon_units: degrees_east\n geospatial_lon_resolution: 1.00 degrees\n geospatial_vertical_units: m\n geospatial_vertical_resolution: \n geospatial_vertical_positive: down\n creator_name: Ocean Climate Laboratory\n creator_email: [email protected]\n creator_url: http://www.nodc.noaa.gov\n project: World Ocean Database\n processing_level: processed\n keywords: <ISO_TOPIC_Category> Oceans</ISO_TOPIC_Category>\n keywords_vocabulary: ISO 19115\n standard_name_vocabulary: CF-1.6\n contributor_name: Ocean Climate Laboratory\n contributor_role: Calculation of anomalies\n featureType: Grid\n cdm_data_type: Grid\n nodc_template_version: NODC_NetCDF_Grid_Template_v1.0\n date_created: 2018-01-18 \n date_modified: 2018-01-18 \n publisher_name: US NATIONAL OCEANOGRAPHIC DATA CENTER\n publisher_url: http://www.nodc.noaa.gov/\n publisher_email: [email protected]\n license: These data are openly available to the public. Please acknowledge the use of these data with the text given in the acknowledgment attribute.\n Metadata_Conventions: Unidata Dataset Discovery v1.0\n metadata_link: http://www.nodc.noaa.gov/OC5/3M_HEAT_CONTENT/\n"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e79d6536351e8e340527cc37d0b05d3431a5aa71 | 346,592 | ipynb | Jupyter Notebook | Automate the Boring Stuff with Python Ch13.ipynb | pgaods/Automate-the-Boring-Stuff-with-Python | 440c04e888911aec2f4707c2be328e3f37a27160 | [
"MIT"
] | null | null | null | Automate the Boring Stuff with Python Ch13.ipynb | pgaods/Automate-the-Boring-Stuff-with-Python | 440c04e888911aec2f4707c2be328e3f37a27160 | [
"MIT"
] | null | null | null | Automate the Boring Stuff with Python Ch13.ipynb | pgaods/Automate-the-Boring-Stuff-with-Python | 440c04e888911aec2f4707c2be328e3f37a27160 | [
"MIT"
] | null | null | null | 471.553741 | 170,717 | 0.933792 | [
[
[
"In this chapter, we study how to work with PDF and Microsoft Word files using Python. PDF and Word documents are binary files, which makes them much more complex than plaintext files. In addition to text, they store lots of font, color, and layout information. If you want your programs to read or write to PDFs or Word documents, you’ll need to do more than simply pass their filenames to open().",
"_____no_output_____"
]
],
[
[
"!pip install PyPDF2",
"Collecting PyPDF2\n Downloading PyPDF2-1.26.0.tar.gz (77 kB)\nBuilding wheels for collected packages: PyPDF2\n Building wheel for PyPDF2 (setup.py): started\n Building wheel for PyPDF2 (setup.py): finished with status 'done'\n Created wheel for PyPDF2: filename=PyPDF2-1.26.0-py3-none-any.whl size=61087 sha256=21f0c54caa5aa7c4fe4f0b5eaaa4baf06d5d66e2d6a6cbfc6f741ae91ea83389\n Stored in directory: c:\\users\\pgao\\appdata\\local\\pip\\cache\\wheels\\80\\1a\\24\\648467ade3a77ed20f35cfd2badd32134e96dd25ca811e64b3\nSuccessfully built PyPDF2\nInstalling collected packages: PyPDF2\nSuccessfully installed PyPDF2-1.26.0\n"
]
],
[
[
"PDF stands for 'Portable Document Format' and uses the .pdf file extension. Although PDFs support many features, this chapter will focus on the two things you’ll be doing most often with them: reading text content from PDFs and crafting new PDFs from existing documents.\n\nPDFs are actually very hard to work with in Python. While PDF files are great for laying out text in a way that’s easy for people to print and read, they’re not straightforward for software to parse into plain text. As such, 'PyPDF2' might make mistakes when extracting text from a PDF and may even be unable to open some PDFs at all. There isn’t much you can do about this, unfortunately. PyPDF2 may simply be unable to work with some of your particular PDF files.\n\nPyPDF2 does not have a way to extract images, charts, or other media from PDF documents, but it can extract text and return it as a Python string.",
"_____no_output_____"
]
],
[
[
"import PyPDF2\nimport os",
"_____no_output_____"
],
[
"path='C:\\\\Users\\\\pgao\\\\Documents\\\\PGZ Documents\\\\Programming Workshop\\PYTHON\\\\Python Books\\\\Automate the Boring Stuff with Python\\\\Datasets and Files'\nos.chdir(path)",
"_____no_output_____"
],
[
"pdfFileObj = open('meetingminutes.pdf', 'rb')\npdfReader = PyPDF2.PdfFileReader(pdfFileObj)\nprint(type(pdfReader))\nprint('Number of the pages for the current PDF file: ', pdfReader.numPages)\npageObj = pdfReader.getPage(0) # getting a 'Page' object by calling the getPage() method (here we get the first page)\npageObj.extractText()",
"<class 'PyPDF2.pdf.PdfFileReader'>\nNumber of the pages for the current PDF file: 19\n"
]
],
[
[
"As you see from the example above, text extractions aren't always perfect: The text Charles E. \"Chas\" Roemer, President from the PDF is absent from the string returned by extractText(), and the spacing is sometimes off. Still, this approximation of the PDF text content may be good enough for your program in many cases. ",
"_____no_output_____"
],
[
"Some PDF documents have an encryption feature that will keep them from being read until whoever is opening the document provides a password. All 'PdfFileReader' objects have an 'isEncrypted' attribute that is 'True' if the PDF is encrypted and 'False' if it isn’t. Any attempt to call a function that reads the file before it has been decrypted with the correct password will result in an error.\n\nTo read an encrypted PDF, we can call the decrypt() function and pass the password as a string. After you call decrypt() with the correct password, you’ll see that calling getPage() no longer causes an error. If given the wrong password, the decrypt() function will return 0 and getPage() will continue to fail. Note that the decrypt() method decrypts only the 'PdfFileReader' object, not the actual PDF file. After your program terminates, the file on your hard drive remains encrypted. Your program will have to call decrypt() again the next time it is run. Below is an example:",
"_____no_output_____"
]
],
[
[
"pdfReader = PyPDF2.PdfFileReader(open('encrypted.pdf', 'rb'))\nprint(pdfReader.isEncrypted)\ntry:\n pdfReader.getPage(0)\nexcept:\n print(\"PdfReadError: file has not been decrypted\")",
"True\nPdfReadError: file has not been decrypted\n"
],
[
"pdfReader.decrypt('rosebud') # the password is rosebud\npageObj = pdfReader.getPage(0)\nprint(pageObj)",
"{'/CropBox': [0, 0, 612, 792], '/Parent': IndirectObject(4, 0), '/Type': '/Page', '/Contents': [IndirectObject(946, 0), IndirectObject(947, 0), IndirectObject(948, 0), IndirectObject(949, 0), IndirectObject(950, 0), IndirectObject(951, 0), IndirectObject(952, 0), IndirectObject(953, 0)], '/Resources': {'/ExtGState': {'/GS0': IndirectObject(954, 0)}, '/XObject': {'/Im0': IndirectObject(955, 0)}, '/ColorSpace': {'/CS1': IndirectObject(956, 0), '/CS2': IndirectObject(956, 0), '/CS0': IndirectObject(6, 0)}, '/Font': {'/TT2': IndirectObject(957, 0), '/TT1': IndirectObject(958, 0), '/TT0': IndirectObject(959, 0), '/TT5': IndirectObject(960, 0), '/TT4': IndirectObject(961, 0), '/TT3': IndirectObject(962, 0)}}, '/MediaBox': [0, 0, 612, 792], '/StructParents': 0, '/Rotate': 0}\n"
]
],
[
[
"Notice that in the original package, there is a bug. If you use the original package, you may encounter an error. The error is the following: after decrypting the 'PdfFileReader' object, calling pdfReader.getPage(0) raises an error with the message: \n\n 'IndexError: list index out of range'.\n \nThe reason is because there is an exception to the source code. To fix this, you will need to go to the location where the library 'PyPDF2' is located. The actual code is in the Python script \"pdf.py\". What you need to do is to follow the instruction below. The line in red needs to be deleted and the line in green must be added. \n\nThe complete solution to the issue is explained in the following site: https://github.com/mstamy2/PyPDF2/issues/327.",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(\"ch13_snapshot_1.jpg\", width=900, height=800) ",
"_____no_output_____"
]
],
[
[
"The counterpart in the package to 'PdfFileReader' objects is 'PdfFileWriter' objects, which can create new PDF files. But 'PyPDF2' cannot write arbitrary text to a PDF like Python can do with plaintext files. Instead, the PDF-writing capabilities are limited to copying pages from other PDFs, rotating pages, overlaying pages, and encrypting files.\n\n'PyPDF2' doesn’t allow you to directly edit a PDF. Instead, you have to create a new PDF and then copy contents over from an existing document. The examples in this section will follow this general approach: 1) open one or more existing PDFs (the source PDFs) into 'PdfFileReader' objects. 2) Create a new 'PdfFileWriter' object. 3) Copy pages from the 'PdfFileReader' objects into the 'PdfFileWriter' object. 4) Finally, use the 'PdfFileWriter' object to write the output.\n\nCreating a 'PdfFileWriter' object generates only a value that represents a PDF document in Python. It doesn’t create the actual PDF file. For that, you must call the write() method from 'PdfFileWriter’. The write() method takes a regular 'File' object that has been opened in write-binary mode. You can get such a 'File' object by calling Python’s open() function with two arguments: the string of what you want the PDF’s filename to be and 'wb' to indicate the file should be opened in write-binary mode.",
"_____no_output_____"
],
[
"Now let's start with copying pages. 'PyPDF2' can help us copy pages from one PDF document to another. This allows us to combine multiple PDF files, cut unwanted pages, or reorder pages. Below is an example:",
"_____no_output_____"
]
],
[
[
"pdf1File = open('meetingminutes.pdf', 'rb')\npdf2File = open('meetingminutes2.pdf', 'rb')\npdf1Reader = PyPDF2.PdfFileReader(pdf1File)\npdf2Reader = PyPDF2.PdfFileReader(pdf2File)\npdfWriter = PyPDF2.PdfFileWriter() # creating a blank PDF document here\n\nfor pageNum in range(pdf1Reader.numPages): # copy all the pages from the PDF and add them to the 'PdfFileWriter' object\n pageObj = pdf1Reader.getPage(pageNum)\n pdfWriter.addPage(pageObj)\n\nfor pageNum in range(pdf2Reader.numPages): # copy all the pages from the PDF and add them to the 'PdfFileWriter' object\n pageObj = pdf2Reader.getPage(pageNum)\n pdfWriter.addPage(pageObj)\n\npdfOutputFile = open('combinedminutes.pdf', 'wb')\npdfWriter.write(pdfOutputFile)\npdfOutputFile.close()\npdf1File.close()\npdf2File.close()",
"_____no_output_____"
]
],
[
[
"One cautionary note: 'PyPDF2' cannot insert pages in the middle of a 'PdfFileWriter' object. The addPage() method will only add pages to the end. Also keep in mind that the 'File' object passed to PyPDF2.PdfFileReader() needs to be opened in read-binary mode by passing 'rb' as the second argument to open(). Likewise, the 'File' object passed to PyPDF2.PdfFileWriter() needs to be opened in write-binary mode with 'wb'.",
"_____no_output_____"
],
[
"We now talk about rotating PDF files. This is very useful if you have a scanned copy of PDF files from someone else and you want to rotate the pages. The pages can be rotated in 90-degree increments with the rotateClockwise() and rotateCounterClockwise() methods. Below is an example. The resulting PDF will have one page, rotated 90 degrees clockwise. ",
"_____no_output_____"
]
],
[
[
"minutesFile = open('meetingminutes.pdf', 'rb')\npdfReader = PyPDF2.PdfFileReader(minutesFile)\npage = pdfReader.getPage(0)\npage.rotateClockwise(90)\n\npdfWriter = PyPDF2.PdfFileWriter() # creating a blank PDF output file\npdfWriter.addPage(page) # adding the rotated page\nresultPdfFile = open('rotatedPage.pdf', 'wb')\npdfWriter.write(resultPdfFile)\nresultPdfFile.close()\nminutesFile.close()",
"_____no_output_____"
]
],
[
[
"Now let's study overlaying pages. 'PyPDF2' can overlay the contents of one page over another, which is useful for adding a logo, timestamp, or watermark to a page. With Python, it’s easy to add watermarks to multiple files and only to pages your program specifies.\n\nHere in the example below, we make a 'PdfFileReader' object of 'meetingminutes.pdf'. We first call the getPage(0) method to get a 'Page' object for the first page and store this object in 'minutesFirstPage'. We then make a 'PdfFileReader' object for 'watermark.pdf' and call mergePage() on 'minutesFirstPage'. The argument we pass to mergePage() is a 'Page' object for the first page of 'watermark.pdf'.\n\nNow that we’ve called mergePage() on 'minutesFirstPag', 'minutesFirstPage' represents the watermarked first page. We make a 'PdfFileWriter' object and add the watermarked first page. Then we loop through the rest of the pages in 'meetingminutes.pdf' and add them to the 'PdfFileWriter' object. Finally, we open a new PDF file called 'watermarkedCover.pdf' and write the contents of the 'PdfFileWriter' to the new PDF. Our new PDF, called 'watermarkedCover.pdf', has all the contents of the 'meetingminutes.pdf' with its first page watermarked.",
"_____no_output_____"
]
],
[
[
"minutesFile = open('meetingminutes.pdf', 'rb')\npdfReader = PyPDF2.PdfFileReader(minutesFile)\nminutesFirstPage = pdfReader.getPage(0)\npdfWatermarkReader = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb'))\nminutesFirstPage.mergePage(pdfWatermarkReader.getPage(0))\npdfWriter = PyPDF2.PdfFileWriter()\npdfWriter.addPage(minutesFirstPage)\n\nfor pageNum in range(1, pdfReader.numPages):\n pageObj = pdfReader.getPage(pageNum)\n pdfWriter.addPage(pageObj)\nresultPdfFile = open('watermarkedCover.pdf', 'wb')\npdfWriter.write(resultPdfFile)\nminutesFile.close()\nresultPdfFile.close()",
"_____no_output_____"
]
],
[
[
"Lastly, a 'PdfFileWriter' object can also add encryption to a PDF document. Below is an example. The key is to use the encrypy() method. In general, PDFs can have a user password (allowing you to view the PDF) and an owner password (allowing you to set permissions for printing, commenting, extracting text, and other features). The user password and owner password are the first and second arguments to encrypt(), respectively. If only one string argument is passed to encrypt(), it will be used for both passwords.\n\nIn this example, we copied the pages of 'meetingminutes.pdf' to a 'PdfFileWriter' object. We encrypted the 'PdfFileWriter' with the password 'swordfish', opened a new PDF called 'encryptedminutes.pdf', and wrote the contents of the 'PdfFileWriter' to the new PDF. Before anyone can view 'encryptedminutes.pdf', they’ll have to enter this password.",
"_____no_output_____"
]
],
[
[
"pdfFile = open('meetingminutes.pdf', 'rb')\npdfReader = PyPDF2.PdfFileReader(pdfFile)\npdfWriter = PyPDF2.PdfFileWriter()\nfor pageNum in range(pdfReader.numPages):\n pdfWriter.addPage(pdfReader.getPage(pageNum))\n\npdfWriter.encrypt('swordfish') # encrypting with a password\nresultPdf = open('encryptedminutes.pdf', 'wb') # this file now is encrypted with the password 'swordfish'\npdfWriter.write(resultPdf)\nresultPdf.close()",
"_____no_output_____"
]
],
[
[
"We now study how to manipulate Microsoft Word documents. This is achieved through the \"Python-Docx\" package, which needs to be installed first. The full documentation for this package is available at https://python-docx.readthedocs.org/.",
"_____no_output_____"
]
],
[
[
"!pip install python-docx",
"Requirement already satisfied: python-docx in c:\\programdata\\anaconda3\\lib\\site-packages\nRequirement already satisfied: lxml>=2.3.2 in c:\\programdata\\anaconda3\\lib\\site-packages (from python-docx)\n"
]
],
[
[
"Although there is a version of Word for OS X, this chapter will focus on Word for Windows. Compared to plaintext, \".docx\" files have a lot of structure. This structure is represented by three different data types in 'Python-Docx'. At the highest level, a 'Document' object represents the entire document. The 'Document' object contains a list of 'Paragraph' objects for the paragraphs in the document. Each of these 'Paragraph' objects contains a list of one or more 'Run' objects. For example, the single-sentence paragraph in the next example has four 'Runs':",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(\"ch13_snapshot_2.jpg\") ",
"_____no_output_____"
]
],
[
[
"You can think of each run as a block of strings that has its own special properties. This is because the text in a (Microsoft) Word document is more than just a string. It has font, size, color, and other styling information associated with it. A 'style' in Word is a collection of these attributes. A 'Run' object is a contiguous run of text with the same 'style'. A new 'Run' object is needed whenever the text 'style' changes.\n\nNow let's read in a Word document and parse each objects:",
"_____no_output_____"
]
],
[
[
"import docx",
"_____no_output_____"
],
[
"doc = docx.Document('demo.docx')\nprint('Number of paragraph objects: ', len(doc.paragraphs))\nob1=doc.paragraphs[0].text\nprint(type(ob1)) # string\nprint(ob1)\nob2=doc.paragraphs[1].text\nprint(type(ob2)) # string\nprint(ob2)\nob3=doc.paragraphs[1].runs\nprint(type(ob3)) # list\nprint(ob3)",
"Number of paragraph objects: 7\n<class 'str'>\nDocument Title\n<class 'str'>\nA plain paragraph with some bold and some italic\n<class 'list'>\n[<docx.text.run.Run object at 0x00000000088A32E8>, <docx.text.run.Run object at 0x00000000088A3240>, <docx.text.run.Run object at 0x00000000088A37F0>, <docx.text.run.Run object at 0x00000000088A37B8>, <docx.text.run.Run object at 0x00000000088A38D0>]\n"
],
[
"print(doc.paragraphs[1].runs[0].text)\nprint(doc.paragraphs[1].runs[1].text)\nprint(doc.paragraphs[1].runs[2].text)\nprint(doc.paragraphs[1].runs[3].text)\nprint(doc.paragraphs[1].runs[4].text)",
"A plain paragraph with\n some \nbold\n and some \nitalic\n"
]
],
[
[
"If you care only about the text, not the styling information, in the Word document, you can use the user-defined getText() function. It accepts a filename of a '.docx' file and returns a single string value of its text:",
"_____no_output_____"
]
],
[
[
"def getText(filename):\n doc = docx.Document(filename)\n fullText = []\n for paragraph in doc.paragraphs:\n fullText.append(paragraph.text)\n return '\\n'.join(fullText)",
"_____no_output_____"
]
],
[
[
"The getText() function opens the Word document, loops over all the 'Paragraph' objects in the paragraphs list, and then appends their text to the list in the 'fullText' list (originally set to be empty). After the loop, the strings in 'fullText' are joined together with newline characters.",
"_____no_output_____"
]
],
[
[
"print(getText('demo.docx'))",
"Document Title\nA plain paragraph with some bold and some italic\nHeading, level 1\nIntense quote\nfirst item in unordered list\nfirst item in ordered list\n\n\n"
]
],
[
[
"Microsoft Word and other word processors use styles to keep the visual presentation of similar types of text consistent and easy to change. For example, perhaps you want to set body paragraphs in 11-point, Times New Roman, left-justified, ragged-right text. You can create a style with these settings and assign it to all body paragraphs. Then, if you later want to change the presentation of all body paragraphs in the document, you can just change the style, and all those paragraphs will be automatically updated.\n\nFor Word documents, there are three types of styles:\n\n 1. Paragraph styles can be applied to 'Paragraph' objects.\n 2. Character styles can be applied to 'Run' objects.\n 3. Linked styles can be applied to both kinds of objects. \n \nYou can give both 'Paragraph' and 'Run' objects styles by setting their 'style' attribute to a string. This string should be the name of a style. If 'style' is set to 'None', then there will be no style associated with the 'Paragraph' or 'Run' object.\n\nThe string values for the default Word styles are as follows: 'Normal', 'Heading5', 'ListBullet', 'ListParagraph', 'BodyText', 'Heading6', 'ListBullet2', 'MacroText', 'BodyText2', 'Heading7', 'ListBullet3', 'NoSpacing', 'BodyText3', 'Heading8', 'ListContinue', 'Quote', 'Caption', 'Heading9', 'ListContinue2', 'Subtitle', 'Heading1', 'IntenseQuote', 'ListContinue3', 'TOCHeading', 'Heading2', 'List', 'ListNumber', 'Title', 'Heading3', 'List2', 'ListNumber2', 'Heading4', 'List3', and 'ListNumber3'.\n\nIn some some early version of the package, when setting the 'style' attribute, we cannot use spaces in the style name. For example, while the style name may be 'Subtle Emphasis', you should set the 'style' attribute to the string value 'SubtleEmphasis' instead of using the 'Subtle Emphasis' string with empty space in between. Including spaces will cause Word to misread the style name and not apply it. But this type of phenomenon depends on what version of the package you are using and refer to the specific documentation. \n\nWhen using a linked style for a 'Run' object, you will need to add the string 'Char' to the end of its name. For example, to set the 'Quote' linked style for a 'Paragraph' object, you would use \"paragraphObj.style = 'Quote'\", but for a 'Run' object, you would use \"runObj.style = 'Quote Char'\".\n\n'Run' objects can be further styled using text attributes. Each attribute can be set to one of three values: 'True' (the attribute is always enabled, no matter what other styles are applied to the run), 'False' (the attribute is always disabled), or 'None' (defaults to whatever the run’s style is set to).\n\nBelow lists some of the text attributes that can be set on 'Run' objects:",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image\nImage(\"ch13_snapshot_3.jpg\") ",
"_____no_output_____"
]
],
[
[
"For example, to change the styles of demo.docx, The following commands will help us get the styles for the document and change styles based on different 'Paragraph' objects and 'Run' objects. \n\nHere in the example below, we use the text and style attributes to easily see what’s in the paragraphs in our document. We can see that it’s simple to divide a paragraph into runs and access each run individiaully. So we get the first, second, and fourth runs in the second paragraph, style each run, and save the results to a new document.",
"_____no_output_____"
]
],
[
[
"doc = docx.Document('demo.docx')\nprint('doc: ', doc.paragraphs[0].text) # 'Document Title'\nprint('The style of the paragraph: ', doc.paragraphs[0].style) # 'Title'\n\ntupleobject=(doc.paragraphs[1].runs[0].text, doc.paragraphs[1].runs[1].text, doc.paragraphs[1].runs[2].text, doc.paragraphs[1].runs[3].text)\nprint(tupleobject)",
"doc: Document Title\nThe style of the paragraph: _ParagraphStyle('Title') id: 143206832\n('A plain paragraph with', ' some ', 'bold', ' and some ')\n"
],
[
"doc.paragraphs[0].style.name \nprint(doc.paragraphs[0].style.name) \ndoc.paragraphs[0].style='Body Text'\nprint(doc.paragraphs[0].style.name) \ndoc.paragraphs[1].runs[0].style = 'Quote Char'\ndoc.paragraphs[1].runs[1].underline = True\ndoc.paragraphs[1].runs[3].underline = True\ndoc.save('restyled.docx')",
"Title\nBody Text\n"
]
],
[
[
"Now let's study how to write Word document using Python. To do, the most important methods include docx.Document(), which is to return a new, blank Word 'Document' object. In addition, the add_paragraph() document method adds a new paragraph of text to the document and returns a reference to the 'Paragraph' object that was added. When you’re done adding text, you may pass a filename string to the save() document method to save the 'Document' object to a file.\n\nIn a similar fashion, calling add_heading() adds a paragraph with one of the heading styles. The arguments to add_heading() are a string of the heading text and an integer from 0 to 4. The integer 0 makes the heading the 'Title' style, which is used for the top of the document. Integers 1 to 4 are for various heading levels, with 1 being the main heading and 4 the lowest subheading. The add_heading() function returns a 'Paragraph' object to save you the step of extracting it from the 'Document' object as a separate step.",
"_____no_output_____"
]
],
[
[
"doc = docx.Document()\ndoc.add_paragraph('Hello world!', 'Title') # adding a title\nparaObj1 = doc.add_paragraph('This is a second paragraph.')\nparaObj2 = doc.add_paragraph('This is a yet another paragraph.')\nparaObj1.add_run(' This text is being added to the second paragraph.')\ndoc.add_heading('Header 0', 0)\ndoc.add_heading('Header 1', 1)\ndoc.add_heading('Header 2', 2)\ndoc.add_heading('Header 3', 3)\ndoc.add_heading('Header 4', 4)\ndoc.save('multipleParagraphs.docx')",
"_____no_output_____"
]
],
[
[
"To add a line break (rather than starting a whole new paragraph), you can call the add_break() method on the 'Run' object you want to have the break appear after. To create page break, you can use the 'docx.enum.text.WD_BREAK.PAGE' argument in the add_break() method. For details can be found here: \n\n https://stackoverflow.com/questions/37608315/python-attributeerror-module-object-has-no-attribute-wd-break\n \nLast but not least, let's talk about inserting pictures. 'Document' objects have an add_picture() method that will let you add an image to the end of the document. Say you have a file in the current working directory. You can add the picture file (say a PNG or JPG file) to the end of your document with a width and height (Word can use both imperial and metric units).\n\nThe example below creates a two-page Word document with 'This is on the first page!' on the first page and 'This is on the second page!' on the second. Finally, we add in a picture with a width of one inch and a height of 4 centimeters. Even though there was still plenty of space on the first page after the text 'This is on the first page!', we forced the next paragraph to begin on a new page by inserting a page break after the first run of the first paragraph:",
"_____no_output_____"
]
],
[
[
"doc = docx.Document()\ndoc.add_paragraph('This is on the first page!')\ndoc.paragraphs[0].runs[0].add_break(docx.enum.text.WD_BREAK.PAGE) # adding a page break\ndoc.add_paragraph('This is on the second page!')\ndoc.add_picture(\"ch13_snapshot_3.jpg\", width=docx.shared.Inches(1), height=docx.shared.Cm(4)) # width 1 inch and height 4 cm\ndoc.save('twoPage.docx')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e79d74a7a9e1c56e86e07fa75c58b92e1cb16adc | 2,112 | ipynb | Jupyter Notebook | examples/gan.ipynb | cosmic-cortex/pytorch-trainer | 9f44c8a500a4345d81feac14b6b200c5d190283a | [
"MIT"
] | null | null | null | examples/gan.ipynb | cosmic-cortex/pytorch-trainer | 9f44c8a500a4345d81feac14b6b200c5d190283a | [
"MIT"
] | null | null | null | examples/gan.ipynb | cosmic-cortex/pytorch-trainer | 9f44c8a500a4345d81feac14b6b200c5d190283a | [
"MIT"
] | null | null | null | 23.466667 | 122 | 0.548769 | [
[
[
"import torch\nimport torch.nn as nn\nimport torchvision.transforms as T\n\nfrom torchkit.models.vision.GAN.gan import Discriminator, Generator\nfrom torchkit.tools.wrapper import GAN\n\nfrom torch.optim import Adam\nfrom torchvision.datasets import MNIST\n\ndataset = MNIST('./data/MNIST', download=True, transform=T.ToTensor())\ndevice = torch.device('cuda:2')",
"_____no_output_____"
],
[
"g = Generator(100, (28, 28))\ng_opt = Adam(g.parameters(), 1e-3)\nd = Discriminator((28, 28))\nd_opt = Adam(d.parameters(), 1e-3)\nd_loss = nn.CrossEntropyLoss()\n\ngan = GAN(g=g, g_opt=g_opt,\n d=d, d_loss=d_loss, d_opt=d_opt,\n noise_shape=100, checkpoint_folder='../checkpoints/GAN_test',\n device=device)",
"_____no_output_____"
],
[
"gan.fit_dataset(dataset, n_epochs=100, n_batch=500, shuffle=True, long_gt=True, verbose=True)",
"{'epoch': 1, 'd_real_loss': 0.361098991582791, 'd_fake_loss': 0.31520444303750994, 'g_loss': 0.3138377378384272}\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e79d86759d1ca419e5fd12fbb7ae8eb79727a182 | 85,252 | ipynb | Jupyter Notebook | sameer/database_construction_(README).ipynb | dekkerlab/matrix_shared | 233c215503e6687ddaebf9197e9dfa1043e41eff | [
"MIT"
] | null | null | null | sameer/database_construction_(README).ipynb | dekkerlab/matrix_shared | 233c215503e6687ddaebf9197e9dfa1043e41eff | [
"MIT"
] | null | null | null | sameer/database_construction_(README).ipynb | dekkerlab/matrix_shared | 233c215503e6687ddaebf9197e9dfa1043e41eff | [
"MIT"
] | null | null | null | 36.620275 | 483 | 0.41219 | [
[
[
"# Overview:\n\nIn order to deal with accessing and storing the mounds of data associated with the matrix project, I have written a script called matrix_manager. The main workhorse of this script is custom class called 'Database' that uses the 'shelve' package (https://docs.python.org/3.4/library/shelve.html). There is also an accessory function called filter_data in this script that I use a lot.\n\nThe purpose of the 'Database' class is to store relevant information related to the project (locations of coolers, location of analysis, location of dot calls), as well as to provide a set of methods to easily access the various type of features we extract from Hi-C maps (scalings, eigvectors, pileups etc)\n\nThis notebook is meant to be a tutorial to show you how to create and use these Database object. All my other scripts rely on this class to access the data that I'm analyzing. ",
"_____no_output_____"
]
],
[
[
"import matrix_manager as mm\nimport shelve\n%matplotlib notebook",
"_____no_output_____"
]
],
[
[
"# Code to create matrix database",
"_____no_output_____"
],
[
"One can create an instance of the Databse object by giving it the path to where the database file are or will be stored. Since the database is being made for the first, all it's attributes are set to either None, '' or [ ].",
"_____no_output_____"
]
],
[
[
"imp.reload(mm)\ndb_path = '/net/levsha/share/sameer/U54/matrix_shared/sameer/metadata/U54_matrix_info'\ndb = mm.Database(db_path)\nprint(db.metadata, db.keys, db.analysis_path, db.cooler_paths, db.dot_paths)",
"None [] \n"
]
],
[
[
"Now I will create the database for the matrix project. For this I will need to feed the Database object 4 things:\n\n1) A list of paths the point to where the coolers are located.\n\n2) The path to the base directory where all the analysis will be stored\n\n3) A list of paths for the dot calls\n\n4) A DataFrame that contains the metadata about the library. This table must contain 6 columns: lib_name, celltyep, xlink, enzyme, cycle, seq. \n\n a) The 'lib_name' column contains the name of the library upto the first '.' in it's name. So U54-ESC-DSG-DdeI-20161014-R1-T1__hg38.hg38.mapq_30.1000.mcool becomes U54-ESC-DSG-DdeI-20161014-R1-T1__hg38\n \n b) 'celltype', 'xlink' and 'enzyme' should be obvious.\n \n c) 'cycle' column represents whether the library is synchronized or not. Most libraries will be classified as NS (non-synchronous) but the HelaS3 libraries will be split into NS, G1 and M\n \n d) 'seq' refers to if the library is a deeply sequenced library or not. This column can take 3 values - 'deep', 'control' or '-'. Libraries labelled 'deep' are deeply sequenced, while libraries called 'control' are not deeply sequenced but have the same ('celltype','xlink','enzyme') combination as a deep library. Libraries called '-' do not have a deep equivalent.",
"_____no_output_____"
]
],
[
[
"cooler_paths = ['/net/levsha/share/lab/U54/2019_mapping_hg38/U54_deep/cooler_library_group/',\n '/net/levsha/share/lab/U54/2019_mapping_hg38/U54_matrix/cooler_library/']\n\nanalysis_path = '/net/levsha/share/sameer/U54/hic_matrix/'\n\ndot_paths = ['/net/levsha/share/lab/U54/2019_mapping_hg38/U54_matrix/snakedots/',\n '/net/levsha/share/lab/U54/2019_mapping_hg38/U54_deep/snakedots/']",
"_____no_output_____"
],
[
"## The details of this cell are not important. I'm just creating the metadata table from the cooler names.\n\ndf_dict = defaultdict(list)\nfor path in cooler_paths:\n for file in os.listdir(path):\n lib_name = file.split('.')[0]\n df_dict['lib_name'].append(lib_name)\n \n if '-END' in lib_name:\n df_dict['celltype'].append('END')\n elif ('-ESC' in lib_name) or ('H1ESC' in lib_name):\n df_dict['celltype'].append('ESC')\n elif '-HFF' in lib_name:\n df_dict['celltype'].append('HFF')\n else:\n df_dict['celltype'].append('HelaS3')\n \n if '-DSG-' in lib_name:\n df_dict['xlink'].append('DSG')\n elif '-EGS-' in lib_name:\n df_dict['xlink'].append('EGS')\n else:\n df_dict['xlink'].append('FA')\n \n if '-MNase-' in lib_name:\n df_dict['enzyme'].append('MNase')\n elif '-DdeI-DpnII-' in lib_name:\n df_dict['enzyme'].append('double')\n elif '-DdeI-' in lib_name:\n df_dict['enzyme'].append('DdeI')\n elif '-DpnII-' in lib_name:\n df_dict['enzyme'].append('DpnII')\n else:\n df_dict['enzyme'].append('HindIII')\n \n if 'deep' in path:\n df_dict['seq'].append('deep')\n else:\n df_dict['seq'].append('-')\n \n if '-G1-' in lib_name:\n df_dict['cycle'].append('G1')\n elif '-M-' in lib_name:\n df_dict['cycle'].append('M')\n else:\n df_dict['cycle'].append('NS')\n \ndf = pd.DataFrame(df_dict).sort_values(['celltype','xlink','enzyme','cycle','seq']).reset_index(drop=True)\ndf = df.drop([4, 5, 21, 22, 39, 40, 42]) \nmetadata = df[['lib_name','seq','celltype','xlink','enzyme','cycle']].reset_index(drop=True)\n\ndeep_indices = metadata.loc[(metadata['seq']=='deep') & (metadata['enzyme'] != 'double')].index.values\nmetadata.loc[deep_indices-1, 'seq'] = 'control'\nmetadata",
"_____no_output_____"
]
],
[
[
"Once we create the dataset, we see that most of the attributes of the object as now filled. Note: If you try to use the create_dataset method once you have already created the shelve object, it will raise an error.",
"_____no_output_____"
]
],
[
[
"db.create_dataset(metadata, cooler_paths, analysis_path, dot_paths)\ndisplay(db.metadata)\nprint(db.keys, db.analysis_path, db.cooler_paths, db.dot_paths)",
"_____no_output_____"
]
],
[
[
"## Accessing and modifying an already existing database\n\nNow that we've created the database, you can access it by initializing the object with the right database_path.",
"_____no_output_____"
]
],
[
[
"imp.reload(mm)\ndb = mm.Database(db_path)\ndisplay(db.metadata)",
"_____no_output_____"
],
[
"# I can also alternative access the metadata using the get_tables() method.\ndisplay(db.get_tables())",
"_____no_output_____"
]
],
[
[
"### Adding to database\n\nFor each feature of Hi-C (pileups for example), I like to create various metrics that quantify that feature (dot enrichment score for example) and store these away permanently. I can do this by using the add_table method. The add table method takes in a DataFrame. However this data __must__ have a column named 'lib_name' that has identical entries to the 'lib_name' column in db.metadata",
"_____no_output_____"
]
],
[
[
"df = db.get_tables()\ndf = df[['lib_name']].copy()\ndf.loc[:, 'dummy'] = 1\ndf",
"_____no_output_____"
],
[
"db.add_table('dummy', df)",
"_____no_output_____"
]
],
[
[
"Now even if I reinitialize the object, it will retreive the 'dummy' table in addition to the metadata",
"_____no_output_____"
]
],
[
[
"db = mm.Database(db_path)\nprint(db.keys)\ndb.get_tables('dummy') # I can give this function a any list of keys that I know the database contains. \n # It will append these tables to the metadata table and return it",
"['dummy']\n"
]
],
[
[
"### Modifying the database\nModifying an existing table is done using the modify_table() method.",
"_____no_output_____"
]
],
[
[
"df['dummy'] = np.nan\ndb.modify_table('dummy', df)",
"_____no_output_____"
],
[
"db.get_tables('dummy')",
"_____no_output_____"
]
],
[
[
"### Removing from the database\nRemoving an existing table is done using the remove_table() method.",
"_____no_output_____"
]
],
[
[
"db.remove_table('dummy')\ndb.keys",
"_____no_output_____"
]
],
[
[
"## Accessing coolers from the database\n\nI've created this database to allow easy access to the various data files associated with the matrix project. I've created methods for retrieving coolers, scalings, eigenvectors, pileups and insulation tracks. Here I will show you how to access cooler files. Storing the cooler objects in the dataframe allows me to easily iterate through the dataframe and apply my operation sequentially.",
"_____no_output_____"
]
],
[
[
"table = db.get_tables()\ntable = db.get_coolers(table, res=100000)\ntable",
"_____no_output_____"
]
],
[
[
" You may be wondering why I chose to feed in the metadata table to to get_coolers() method, we the database object already has access to the metadata. The reason for this is that I can now chain several get_coolers() methods together as shown below. \n \n I use this methodology regularly for analysis that requires multiple types of data as input. For example, for making saddleplots, I would need coolers, expected curves and eigenvectors. Using this, I can easily pipe the result of get_coolers() into the get_scalings() method and further pipe the output of that into the get_eigendecomps() method. This allows me to access and keep track of all the required data to create saddleplots for the entire matrix project in one shot.",
"_____no_output_____"
]
],
[
[
"table = db.get_tables()\ntable = db.get_coolers(table, res=100000)\ndisplay(table.head())\ntable = db.get_coolers(table, res=1000)\ndisplay(table.head())",
"_____no_output_____"
]
],
[
[
"I've tried to make the code as flexible as possible but there are some bottlenecks. For example, P(s) curves are expected to be stored in hdf5 formats because it allows me to store P(s) as well as average trans interactions in the same location. Similiarly, similarly eigenvectors and eigenvalues are stored together in an hdf5 file. Pileups are expected to be stored in the .npy format and insulation tracks are .txt file with '\\t' separation.\n\nThe my various notebooks should be able to show you how I use this database class to do all the analysis I've done",
"_____no_output_____"
],
[
"The last function that will be used often is the filter_data function. This is **NOT** a method of the Database class. It is used to filter the table using values of the metadata. For example, I will show you, how I filter for libraries that are either 'HFF' or 'ESC' but only 'DSG'",
"_____no_output_____"
]
],
[
[
"mm.filter_data(table, filter_dict={'celltype':['ESC','HFF'],'xlink':'DSG'})",
"_____no_output_____"
]
],
[
[
"**NOTE:**\n\n**The version of the Database class that I was using uptil now had certain things like cooler_paths etc hardcoded into the script. I modified the scripts so that others can use the Database class by just allowing those paths to fed in as variable in the create_dataset() method.**\n\n**In the process, I also modified the layout a bit. The get_coolers, get_scalings functions were earlier independent function but are now methods of the Database object. Also I cleaned up the code and changed the namespace a bit here and there**\n\n**All this is to say that the current version of the Database code may not work with the workflow in the other notebooks. If you encounter a notebook where this is the case, either try to modify it yourself or let me know**",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e79d95207eddb1c7554add215db4a3edf09bde05 | 33,477 | ipynb | Jupyter Notebook | R/09 Self join.ipynb | madlogos/sqlzoo | 6f4a4c7905e245fda40f0d31e0e756805b5ae063 | [
"MIT"
] | null | null | null | R/09 Self join.ipynb | madlogos/sqlzoo | 6f4a4c7905e245fda40f0d31e0e756805b5ae063 | [
"MIT"
] | null | null | null | R/09 Self join.ipynb | madlogos/sqlzoo | 6f4a4c7905e245fda40f0d31e0e756805b5ae063 | [
"MIT"
] | null | null | null | 33.986802 | 330 | 0.422021 | [
[
[
"# Self join\n\n## Edinburgh Buses\n[Details of the database](https://sqlzoo.net/wiki/Edinburgh_Buses.) Looking at the data\n\n```\nstops(id, name)\nroute(num, company, pos, stop)\n```",
"_____no_output_____"
]
],
[
[
"library(tidyverse)\nlibrary(DBI)\nlibrary(getPass)\ndrv <- switch(Sys.info()['sysname'],\n Windows=\"PostgreSQL Unicode(x64)\",\n Darwin=\"/usr/local/lib/psqlodbcw.so\",\n Linux=\"PostgreSQL\")\ncon <- dbConnect(\n odbc::odbc(),\n driver = drv,\n Server = \"localhost\",\n Database = \"sqlzoo\",\n UID = \"postgres\",\n PWD = getPass(\"Password?\"),\n Port = 5432\n)\noptions(repr.matrix.max.rows=20)",
"-- \u001b[1mAttaching packages\u001b[22m --------------------------------------- tidyverse 1.3.0 --\n\n\u001b[32mv\u001b[39m \u001b[34mggplot2\u001b[39m 3.3.0 \u001b[32mv\u001b[39m \u001b[34mpurrr \u001b[39m 0.3.4\n\u001b[32mv\u001b[39m \u001b[34mtibble \u001b[39m 3.0.1 \u001b[32mv\u001b[39m \u001b[34mdplyr \u001b[39m 0.8.5\n\u001b[32mv\u001b[39m \u001b[34mtidyr \u001b[39m 1.0.2 \u001b[32mv\u001b[39m \u001b[34mstringr\u001b[39m 1.4.0\n\u001b[32mv\u001b[39m \u001b[34mreadr \u001b[39m 1.3.1 \u001b[32mv\u001b[39m \u001b[34mforcats\u001b[39m 0.5.0\n\n-- \u001b[1mConflicts\u001b[22m ------------------------------------------ tidyverse_conflicts() --\n\u001b[31mx\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mfilter()\u001b[39m masks \u001b[34mstats\u001b[39m::filter()\n\u001b[31mx\u001b[39m \u001b[34mdplyr\u001b[39m::\u001b[32mlag()\u001b[39m masks \u001b[34mstats\u001b[39m::lag()\n\n"
]
],
[
[
"## 1.\nHow many **stops** are in the database.",
"_____no_output_____"
]
],
[
[
"stops <- dbReadTable(con, 'stops')\nroute <- dbReadTable(con, 'route')",
"_____no_output_____"
],
[
"stops %>% tally",
"_____no_output_____"
]
],
[
[
"## 2.\nFind the **id** value for the stop 'Craiglockhart'",
"_____no_output_____"
]
],
[
[
"stops %>% \n filter(name=='Craiglockhart') %>% \n select(id)",
"_____no_output_____"
]
],
[
[
"## 3.\nGive the **id** and the **name** for the **stops** on the '4' 'LRT' service.",
"_____no_output_____"
]
],
[
[
"stops %>% \n inner_join(route, by=c(id=\"stop\")) %>%\n filter(num=='4' & company=='LRT') %>%\n select(id, name)",
"_____no_output_____"
]
],
[
[
"## 4. Routes and stops\n\nThe query shown gives the number of routes that visit either London Road (149) or Craiglockhart (53). Run the query and notice the two services that link these stops have a count of 2. Add a HAVING clause to restrict the output to these two routes.",
"_____no_output_____"
]
],
[
[
"route %>% \n filter(stop==149 | stop==53) %>%\n group_by(company, num) %>% \n summarise(n_route=n()) %>%\n filter(n_route==2)",
"_____no_output_____"
]
],
[
[
"## 5.\nExecute the self join shown and observe that b.stop gives all the places you can get to from Craiglockhart, without changing routes. Change the query so that it shows the services from Craiglockhart to London Road.",
"_____no_output_____"
]
],
[
[
"route %>% \n inner_join(route, by=c(company=\"company\", num=\"num\")) %>%\n filter(stop.x==53 & stop.y==149) %>%\n select(company, num, stop.x, stop.y)",
"_____no_output_____"
]
],
[
[
"## 6.\nThe query shown is similar to the previous one, however by joining two copies of the **stops** table we can refer to **stops** by **name** rather than by number. Change the query so that the services between 'Craiglockhart' and 'London Road' are shown. If you are tired of these places try 'Fairmilehead' against 'Tollcross'",
"_____no_output_____"
]
],
[
[
"route %>% \n inner_join(stops, by=c(stop=\"id\")) %>% \n inner_join(route %>%\n inner_join(stops, by=c(stop=\"id\")),\n by=c(company=\"company\", num=\"num\")\n ) %>%\n filter(name.x=='Craiglockhart' &\n name.y=='London Road') %>%\n select(company, num, name.x, name.y)",
"_____no_output_____"
]
],
[
[
"## 7. [Using a self join](https://sqlzoo.net/wiki/Using_a_self_join)\n\nGive a list of all the services which connect stops 115 and 137 ('Haymarket' and 'Leith')",
"_____no_output_____"
]
],
[
[
"route %>% \n inner_join(route, by=c(company=\"company\", num=\"num\")) %>%\n filter(stop.x==115 & stop.y==137) %>%\n distinct(company, num)",
"_____no_output_____"
]
],
[
[
"## 8.\nGive a list of the services which connect the stops 'Craiglockhart' and 'Tollcross'",
"_____no_output_____"
]
],
[
[
"route %>% \n inner_join(stops, by=c(stop=\"id\")) %>% \n inner_join(route %>%\n inner_join(stops, by=c(stop=\"id\")),\n by=c(company=\"company\", num=\"num\")\n ) %>%\n filter(name.x=='Craiglockhart' & \n name.y=='Tollcross') %>%\n distinct(company, num)",
"_____no_output_____"
]
],
[
[
"## 9.\nGive a distinct list of the **stops** which may be reached from 'Craiglockhart' by taking one bus, including 'Craiglockhart' itself, offered by the LRT company. Include the company and bus no. of the relevant services.",
"_____no_output_____"
]
],
[
[
"route %>% \n inner_join(stops, by=c(stop=\"id\")) %>% \n inner_join(route %>%\n inner_join(stops, by=c(stop=\"id\")),\n by=c(company=\"company\", num=\"num\")\n ) %>%\n filter(name.x=='Craiglockhart' &\n company=='LRT') %>%\n distinct(name.y, company, num)",
"_____no_output_____"
]
],
[
[
"## 10.\nFind the routes involving two buses that can go from **Craiglockhart** to **Lochend**.\nShow the bus no. and company for the first bus, the name of the stop for the transfer,\nand the bus no. and company for the second bus.\n\n> _Hint_ \n> Self-join twice to find buses that visit Craiglockhart and Lochend, then join those on matching stops.",
"_____no_output_____"
]
],
[
[
"bus1 <- route %>%\n inner_join(stops, by=c(stop=\"id\")) %>% \n inner_join(route %>%\n inner_join(stops, by=c(stop=\"id\")),\n by=c(company=\"company\", num=\"num\")\n ) %>%\n filter(name.x=='Craiglockhart')\nbus2 <- route %>%\n inner_join(stops, by=c(stop=\"id\")) %>% \n inner_join(route %>%\n inner_join(stops, by=c(stop=\"id\")),\n by=c(company=\"company\", num=\"num\")\n ) %>%\n filter(name.y=='Lochend')\nbus1 %>% \n inner_join(bus2, by=c(stop.y=\"stop.x\")) %>%\n select(num.x, company.x, name.y.x, num.y, company.y) %>%\n `names<-`(c('num1', 'company1', 'transfer', 'num2', 'company2'))",
"_____no_output_____"
],
[
"dbDisconnect(con)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e79d97679fea1ac60351bd88f78312e9deb98ce3 | 21,927 | ipynb | Jupyter Notebook | Veera/linear_discriminant_analysis.ipynb | kaustubhcs/FaceRec-ADAM | f30c0557e48cddb77d13d32fa6a6c2f00c0ce846 | [
"MIT"
] | null | null | null | Veera/linear_discriminant_analysis.ipynb | kaustubhcs/FaceRec-ADAM | f30c0557e48cddb77d13d32fa6a6c2f00c0ce846 | [
"MIT"
] | 6 | 2018-04-10T20:24:35.000Z | 2018-04-21T03:58:18.000Z | Veera/linear_discriminant_analysis.ipynb | kaustubhcs/FaceRec-ADAM | f30c0557e48cddb77d13d32fa6a6c2f00c0ce846 | [
"MIT"
] | null | null | null | 28.075544 | 93 | 0.518949 | [
[
[
"# Loading and displaying an image\n# from PIL import Image\n# img = Image.open('/home/dvveera/db/orl/s1/1.pgm')\n# print(img.format, img.size, img.mode)\n# display(img)",
"_____no_output_____"
],
[
"# Load the dataset\nimport load_dataset\nimport numpy as np\nimport math\nfrom PIL import Image\n\ndir_src = '/home/dvveera/dvveera_hdd2tb/orl'\ndata_label = load_dataset.load_data_label(dir_src)\n\n# Training, validation and test data with labels\n# data_label = list(train, valid, test, mean_train)\ntrain_storage, train_labels = zip(*data_label[0])\nvalid_storage, valid_labels = zip(*data_label[1])\ntest_storage, test_labels = zip(*data_label[2])\nmean_storage = data_label[3]",
"Assigned Labels to Training, Validation and Test Data\nTrain data: 20/240\nTrain data: 40/240\nTrain data: 60/240\nTrain data: 80/240\nTrain data: 100/240\nTrain data: 120/240\nTrain data: 140/240\nTrain data: 160/240\nTrain data: 180/240\nTrain data: 200/240\nTrain data: 220/240\nValid data: 20/80\nValid data: 40/80\nValid data: 60/80\nTest data: 20/80\nTest data: 40/80\nTest data: 60/80\n"
],
[
"# Sample set and labels\n# import numpy as np\n# from PIL import Image\n# train_storage = []\n# train_storage.append(np.array([[196, 35, 234], [232, 59, 244], [243, 57, 226]]))\n# train_storage.append(np.array([[188, 15, 236], [244, 44, 228], [251, 48, 230]]))\n# train_storage.append(np.array([[246, 48, 222], [225, 40, 226], [208, 35, 234]]))\n# train_storage.append(np.array([[208, 16, 235], [255, 44, 229], [236, 34, 247]]))\n# train_storage.append(np.array([[245, 21, 213], [254, 55, 252], [215, 51, 249]]))\n# train_storage.append(np.array([[248, 22, 225], [252, 30, 240], [242, 27, 244]]))\n# train_storage.append(np.array([[255, 223, 224], [255, 0, 255], [249, 255, 235]]))\n# train_storage.append(np.array([[234, 255, 205], [251, 0, 251], [238, 253, 240]]))\n# train_storage.append(np.array([[232, 255, 231], [247, 38, 246], [190, 236, 250]]))\n# train_storage.append(np.array([[255, 241, 208], [255, 28, 255], [194, 234, 188]]))\n# train_storage.append(np.array([[237, 243, 237], [237, 19, 251], [227, 225, 237]]))\n# train_storage.append(np.array([[224, 251, 215], [245, 31, 222], [233, 255, 254]]))\n# train_labels = [2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1]\n# for i in train_storage:\n# dis1 = Image.fromarray(np.transpose(np.uint8(i)))\n# dis1 = dis1.resize((100, 100))\n# display(dis1)\n# print(train_storage)",
"_____no_output_____"
],
[
"%%time\n# Linear Discriminant Analysis\nfrom collections import Counter\n# Converting images to a one-dimensional vector\ntrain_1d = [t.flatten() for t in train_storage]\n# Calculate the mean of images in each class\nlabel_dict = Counter(train_labels)\nmean = {}\nfor idx, t in enumerate(train_1d):\n lab = train_labels[idx]\n if lab not in mean:\n mean[lab] = t / label_dict[lab]\n else:\n mean[lab] += t / label_dict[lab]\n# print('Calculated the means of the images in each class')\n# print(mean)",
"CPU times: user 5.82 ms, sys: 192 µs, total: 6.01 ms\nWall time: 5.87 ms\n"
],
[
"%%time\n# Calculate in class scatter matrix\nSw = []\nfor idx, t in enumerate(train_1d):\n lab = train_labels[idx]\n if idx == 0:\n Sw = np.outer(t - mean[lab], t - mean[lab])\n else:\n Sw += np.outer(t - mean[lab], t - mean[lab])\nprint('Calculated the in-class scatter matrix')\n# print(Sw)",
"Calculated the in-class scatter matrix\nCPU times: user 36.2 s, sys: 40.4 s, total: 1min 16s\nWall time: 1min 16s\n"
],
[
"%%time\n# Calculate between class scatter matrix\n# Calculate the mean of all images\nmean_all = np.sum(train_1d, axis = 0) / len(train_1d)\n# Calculate between class scatter matrix\nfor idx, m in enumerate(mean):\n if idx == 0:\n Sb = label_dict[m] * np.outer(mean[m] - mean_all, mean[m] - mean_all)\n else:\n Sb += label_dict[m] * np.outer(mean[m] - mean_all, mean[m] - mean_all)\nprint('Calculated the between-class scatter matrix')\n# print(Sb)",
"Calculated the between-class scatter matrix\nCPU times: user 9.26 s, sys: 12.8 s, total: 22.1 s\nWall time: 22.1 s\n"
],
[
"%%time\n# Compute the generalized eigen values and eigen vectors\nA = np.dot(np.linalg.inv(Sw), Sb)\neig_values, eig_vectors = np.linalg.eigh(A)\n# print(eig_values)\n# print(mean_all)\n# print(mean)\neig = list(zip(eig_values, np.transpose(eig_vectors)))",
"CPU times: user 5min 26s, sys: 1.98 s, total: 5min 28s\nWall time: 1min 21s\n"
],
[
"%%time\n# Sort eigenvalues and eigenvectors\neig = sorted(eig, key=lambda x: x[0], reverse = True)\nprint('Sorted eigen values and eigen vectors from high to low')\n# print(eig)\n# temp = eig_values\n# sorted(temp, reverse = True)\n# print(temp)",
"Sorted eigen values and eigen vectors from high to low\nCPU times: user 765 µs, sys: 113 µs, total: 878 µs\nWall time: 877 µs\n"
],
[
"%%time\n# Select the first C - 1 eigen vectors\neig_values, eig_vectors = zip(*eig)\nnew_eig = []\nfor idx, e in enumerate(eig):\n if idx < (len(label_dict) - 1):\n new_eig.append(e)\nprint('Selected the first C - 1 eigenvectors')\n# print(new_eig)\neig_values, eig_vectors = zip(*new_eig)",
"Selected the first C - 1 eigenvectors\nCPU times: user 14.2 ms, sys: 4.32 ms, total: 18.5 ms\nWall time: 18.4 ms\n"
],
[
"# # Select specified number of eigenvectors\n# n_eig = int(np.ceil(0.01 * len(new_eig)))\n# temp_values = []\n# temp_vectors = []\n# for i in range(n_eig):\n# temp_values.append(eig_values[i])\n# temp_vectors.append(eig_vectors[i])\n# eig_values, eig_vectors = temp_values, temp_vectors\n# print(len(eig_values))",
"_____no_output_____"
],
[
"# # Remove last 40% of eigenvectors\n# n_eig = int(np.ceil(0.6 * len(eig_values)))\n# temp_values = []\n# temp_vectors = []\n# for i in range(n_eig):\n# temp_values.append(eig_values[i])\n# temp_vectors.append(eig_vectors[i])\n# eig_values, eig_vectors = temp_values, temp_vectors\n# print(len(eig_values))",
"_____no_output_____"
],
[
"# # Select eigenvectors based on energy\n# threshold = 0.9\n# sum_eig = np.sum(eig_values)\n# temp_values = []\n# temp_vectors = []\n# sum_e = 0\n# for e in eig_values:\n# if (sum_e / sum_eig) > threshold:\n# break\n# else:\n# temp_values.append(e)\n# idx = eig_values.index(e)\n# temp_vectors.append(eig_vectors[idx])\n# sum_e += e\n# eig_values, eig_vectors = temp_values, temp_vectors\n# print(len(eig_values))",
"_____no_output_____"
],
[
"# # Select eigenvectors based on stretching\n# threshold = 0.01\n# largest_eig = eig_values[0]\n# temp_values = []\n# temp_vectors = []\n# for e in eig_values:\n# if (e / largest_eig) < threshold:\n# break\n# else:\n# temp_values.append(e)\n# idx = eig_values.index(e)\n# temp_vectors.append(eig_vectors[idx])\n# eig_values, eig_vectors = temp_values, temp_vectors\n# print(len(eig_values))",
"_____no_output_____"
],
[
"# # Remove the first eigenvector\n# temp_values = []\n# temp_vectors = []\n# first_eig = eig_values[0]\n# for e in eig_values:\n# if e == first_eig:\n# pass\n# else:\n# temp_values.append(e)\n# idx = eig_values.index(e)\n# temp_vectors.append(eig_vectors[idx])\n# eig_values, eig_vectors = temp_values, temp_vectors\n# print(len(eig_values))",
"_____no_output_____"
],
[
"%%time\n# Create projection matrix\n# eig_values, eig_vectors = zip(*new_eig)\nV = np.transpose(eig_vectors)",
"CPU times: user 4.94 ms, sys: 139 µs, total: 5.08 ms\nWall time: 4.51 ms\n"
],
[
"%%time\n# Project training images into subspace\ndata_matrix = np.transpose(train_1d)\nproj_data_matrix = np.dot(eig_vectors, data_matrix)",
"CPU times: user 14.3 ms, sys: 9.5 ms, total: 23.8 ms\nWall time: 14 ms\n"
],
[
"%%time\n# Identify test images\nvalid_1d = [t.flatten() for t in valid_storage]\n# Create data matrix\nvalid_data_matrix = np.transpose(valid_1d)\n# Project test images into the same subspace as training images\nproj_valid_matrix = np.dot(eig_vectors, valid_data_matrix)",
"CPU times: user 5.53 ms, sys: 4.23 ms, total: 9.76 ms\nWall time: 7.3 ms\n"
],
[
"%%time\n# Calculate L2 norms\ntr1 = np.transpose(proj_valid_matrix)\ntr2 = np.transpose(proj_data_matrix)\nl2 = []\nfor a in tr1:\n l1 = [np.linalg.norm(a - b) for b in tr2]\n l2.append(l1)\n# Find the index of the training image that matches closely with the test image\nidx = [l.index(min(l)) for l in l2]",
"CPU times: user 123 ms, sys: 17.8 ms, total: 141 ms\nWall time: 95.1 ms\n"
],
[
"# # Calculate L1 norms\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [np.sum(abs(a - b)) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(min(l)) for l in l2]",
"_____no_output_____"
],
[
"# # Calculate covariance measure\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [ - np.dot(a / np.linalg.norm(a), b / np.linalg.norm(b)) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(min(l)) for l in l2]",
"_____no_output_____"
],
[
"# Function to calculate mahalanobis distance between two image vectors\ndef mah(x, y):\n m = 0\n for i in range(len(x)):\n m += x[i] * y[i] * 1 / np.sqrt(eig_values[i])\n return -m",
"_____no_output_____"
],
[
"# # Calculate mahalanobis\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [mah(a, b) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(min(l)) for l in l2]",
"_____no_output_____"
],
[
"# Function to calculate correlation between two image vectors\ndef corr(x, y):\n mean_x = np.mean(x)\n mean_y = np.mean(y)\n std_x = np.std(x, ddof = 1)\n std_y = np.std(y, ddof = 1)\n m = 0\n for i in range(len(x)):\n m += (x[i] - mean_x) * (y[i] - mean_y) / (std_x * std_y)\n return m",
"_____no_output_____"
],
[
"# # Calculate correlation\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [corr(a, b) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(max(l)) for l in l2]",
"_____no_output_____"
],
[
"%%time\n# Find the label for the test images\nout = []\nfor i in idx:\n out.append(train_labels[i])\n# print(list(zip(out, idx)))\n# print(list(zip(valid_labels, idx)))",
"CPU times: user 11 µs, sys: 1e+03 ns, total: 12 µs\nWall time: 13.8 µs\n"
],
[
"%%time\n# Find the accuracy\ncorrect = 0\nfor i in range(len(out)):\n if out[i] == valid_labels[i]:\n correct += 1\naccuracy = correct * 100 / len(out)\nprint('Accuracy(%): ' + str(accuracy))",
"Accuracy(%): 96.25\nCPU times: user 325 µs, sys: 48 µs, total: 373 µs\nWall time: 238 µs\n"
],
[
"# Identify test images\ntest_1d = [t.flatten() for t in test_storage]\n# Create data matrix\ntest_data_matrix = np.transpose(test_1d)\n# Project test images into the same subspace as training images\nproj_test_matrix = np.dot(eig_vectors, test_data_matrix)",
"_____no_output_____"
],
[
"# Calculate L2 norms\ntr1 = np.transpose(proj_test_matrix)\ntr2 = np.transpose(proj_data_matrix)\nl2 = []\nfor a in tr1:\n l1 = [np.linalg.norm(a - b) for b in tr2]\n l2.append(l1)\n# Find the index of the training image that matches closely with the test image\nidx = [l.index(min(l)) for l in l2]",
"_____no_output_____"
],
[
"# # Calculate L1 norms\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [np.sum(abs(a - b)) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(min(l)) for l in l2]",
"_____no_output_____"
],
[
"# # Calculate covariance measure\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [ - np.dot(a / np.linalg.norm(a), b / np.linalg.norm(b)) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(min(l)) for l in l2]",
"_____no_output_____"
],
[
"# # Calculate mahalanobis\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [mah(a, b) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(min(l)) for l in l2]",
"_____no_output_____"
],
[
"# # Calculate correlation\n# tr1 = np.transpose(proj_valid_matrix)\n# tr2 = np.transpose(proj_data_matrix)\n# l2 = []\n# for a in tr1:\n# l1 = [corr(a, b) for b in tr2]\n# l2.append(l1)\n# # Find the index of the training image that matches closely with the test image\n# idx = [l.index(max(l)) for l in l2]",
"_____no_output_____"
],
[
"# Find the label for the test images\nout = []\nfor i in idx:\n out.append(train_labels[i])",
"_____no_output_____"
],
[
"# Find the accuracy\ncorrect = 0\nfor i in range(len(out)):\n if out[i] == test_labels[i]:\n correct += 1\naccuracy = correct * 100 / len(out)\nprint('Accuracy(%): ' + str(accuracy))",
"_____no_output_____"
],
[
"# # Print images (true / predicted)\n# j = 0\n# for i in range(len(out)):\n# dis1 = Image.fromarray(train_storage[idx[i]])\n# display(dis1)\n# print('Predicted:' + str(out[i]))\n# dis2 = Image.fromarray(test_storage[j])\n# display(dis2)\n# print('True:' + str(test_labels[j]))\n# print('-----------------------------------')\n# j += 1\n# # print(list(zip(out, idx)))\n# # print(list(zip(test_labels, idx)))",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79da098a03015c2594862b128f199d639d7d0e5 | 146,760 | ipynb | Jupyter Notebook | hausaTweet.ipynb | elsunais6167/hausaSentiment | c5ebceb16516e4504697094209eee202b047c48c | [
"MIT"
] | null | null | null | hausaTweet.ipynb | elsunais6167/hausaSentiment | c5ebceb16516e4504697094209eee202b047c48c | [
"MIT"
] | null | null | null | hausaTweet.ipynb | elsunais6167/hausaSentiment | c5ebceb16516e4504697094209eee202b047c48c | [
"MIT"
] | null | null | null | 192.346003 | 23,346 | 0.886563 | [
[
[
"#import libraries\n\nimport pandas as pd\nimport numpy as np\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#import Sckit-helper functions\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n\n#import Scikit-learn machine models\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import LogisticRegression\n\n#import performance metrics functions\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report",
"_____no_output_____"
],
[
"#importing dataset\n\ndata = pd.read_csv('sentiment_analysis_ouptut.csv')\ndata",
"_____no_output_____"
],
[
"#Categorizing sentiments \n#(-1 downward as negative sentiment category[0])\n#(+1 upward as positive sentiment category [1])\n\ndata.loc[(data['sentiment'] <0 ), 'sentiment'] = 0\ndata.loc[(data['sentiment'] >0), 'sentiment'] = 1\n\nj = 0\nfor i in range(0,1):\n data.loc[(data[\"sentiment\"] >= j) & (data[\"sentiment\"] <= i*10), \"sentiment\"] = i*10\n i = i + 1\n j = j + 10\n\ndata",
"_____no_output_____"
],
[
"#data split into training and testing datastet\n\ntest_percentage = .2\ntrain_df, test_df = train_test_split(data, test_size=test_percentage, random_state=42)\n\nlabels = train_df['sentiment']\ntest_labels = test_df['sentiment']\n\nprint(\"\\n### Split Complete ###\\n\")",
"\n### Split Complete ###\n\n"
],
[
"# Print counts of each class\nprint(\"- Counting Splits -\")\nprint(\"Training Samples:\", len(train_df))\nprint(\"Testing Samples:\", len(test_df))\n\n# Graph counts of each class, for both training and testing\n\ncount_train_classes = pd.value_counts(train_df['sentiment'])\ncount_train_classes.plot(kind='bar', fontsize=16)\nplt.title(\"Sentiment Count (Training)\", fontsize=20)\nplt.xticks(rotation='horizontal')\nplt.xlabel(\"Sentiment\", fontsize=20)\nplt.ylabel(\"Sentiment Count\", fontsize=20)\n\nplt.show()\n\ncount_test_classes = pd.value_counts(test_df['sentiment'])\ncount_test_classes.plot(kind='bar', fontsize=16, colormap='ocean')\nplt.title(\"Sentiment Count (Testing)\", fontsize=20)\nplt.xticks(rotation='horizontal')\nplt.xlabel(\"Sentiment\", fontsize=20)\nplt.ylabel(\"Sentiment Count\", fontsize=20)\n\nplt.show()",
"- Counting Splits -\nTraining Samples: 2359\nTesting Samples: 590\n"
],
[
"# Vectorizer the training inputs -- Takes about 30 seconds to complete\n# There are two types of vectors:\n# 1. Count vectorizer\n# 2. Term Frequency-Inverse Document Frequency (TF-IDF)\n\n\n\nprint(\"- Training Count Vectorizer -\")\ncVec = CountVectorizer()\ncount_X = cVec.fit_transform(train_df['tweet_text'])\n\nprint(\"- Training TF-IDF Vectorizer -\")\ntVec = TfidfVectorizer()\ntfidf_X = tVec.fit_transform(train_df['tweet_text'])\n\n\nprint(\"\\n### Vectorizing Complete ###\\n\")",
"- Training Count Vectorizer -\n- Training TF-IDF Vectorizer -\n\n### Vectorizing Complete ###\n\n"
],
[
"# Vectorize the testing inputs\n# Use 'transform' instead of 'fit_transform' because we've already trained our vectorizers\n\nprint(\"- Count Vectorizer -\")\ntest_count_X = cVec.transform(test_df['tweet_text'])\n\nprint(\"- TFIDF Vectorizer -\")\ntest_tfidf_X = tVec.transform(test_df['tweet_text'])\n\n\nprint(\"\\n### Vectorizing Complete ###\\n\")",
"- Count Vectorizer -\n- TFIDF Vectorizer -\n\n### Vectorizing Complete ###\n\n"
],
[
"def generate_report(cmatrix, score, creport):\n \"\"\"Generates and displays graphical reports\n Keyword arguments:\n cmatrix - Confusion matrix generated by the model\n score --- Score generated by the model\n creport - Classification Report generated by the model\n \n :Returns -- N/A\n \"\"\"\n \n # Transform cmatrix because Sklearn has pred as columns and actual as rows.\n cmatrix = cmatrix.T\n \n # Generate confusion matrix heatmap\n plt.figure(figsize=(5,5))\n sns.heatmap(cmatrix, \n annot=True, \n fmt=\"d\", \n linewidths=.5, \n square = True, \n cmap = 'Blues', \n annot_kws={\"size\": 16}, \n xticklabels=['negative', 'positive'],\n yticklabels=['negative', 'positive'])\n\n plt.xticks(rotation='horizontal', fontsize=16)\n plt.yticks(rotation='horizontal', fontsize=16)\n plt.xlabel('Actual Label', size=20);\n plt.ylabel('Predicted Label', size=20);\n\n title = 'Accuracy Score: {0:.4f}'.format(score)\n plt.title(title, size = 20);\n\n # Display classification report and confusion matrix\n print(creport)\n plt.show()\n \n\nprint(\"\\n### Report Generator Defined ###\\n\")",
"\n### Report Generator Defined ###\n\n"
],
[
"# Multinomial Naive Bayesian with TF-IDF\n\n# Train the model\nmnb_tfidf = MultinomialNB()\nmnb_tfidf.fit(tfidf_X, labels)\n\n\n# Test the mode (score, predictions, confusion matrix, classification report)\nscore_mnb_tfidf = mnb_tfidf.score(test_tfidf_X, test_labels)\npredictions_mnb_tfidf = mnb_tfidf.predict(test_tfidf_X)\ncmatrix_mnb_tfidf = confusion_matrix(test_labels, predictions_mnb_tfidf)\ncreport_mnb_tfidf = classification_report(test_labels, predictions_mnb_tfidf)\n\nprint(\"\\n### Model Built ###\\n\")\ngenerate_report(cmatrix_mnb_tfidf, score_mnb_tfidf, creport_mnb_tfidf)",
"\n### Model Built ###\n\n precision recall f1-score support\n\n 0 0.77 0.87 0.82 306\n 1 0.84 0.72 0.77 284\n\n accuracy 0.80 590\n macro avg 0.80 0.80 0.80 590\nweighted avg 0.80 0.80 0.80 590\n\n"
],
[
"# Multinomial Naive Bayesian with Count Vectorizer\n\n# Train the model\nmnb_count = MultinomialNB()\nmnb_count.fit(count_X, labels)\n\n\n# Test the mode (score, predictions, confusion matrix, classification report)\nscore_mnb_count = mnb_count.score(test_count_X, test_labels)\npredictions_mnb_count = mnb_count.predict(test_count_X)\ncmatrix_mnb_count = confusion_matrix(test_labels, predictions_mnb_count)\ncreport_mnb_count = classification_report(test_labels, predictions_mnb_count)\n\nprint(\"\\n### Model Built ###\\n\")\ngenerate_report(cmatrix_mnb_count, score_mnb_count, creport_mnb_count)",
"\n### Model Built ###\n\n precision recall f1-score support\n\n 0 0.84 0.76 0.80 306\n 1 0.77 0.84 0.80 284\n\n accuracy 0.80 590\n macro avg 0.80 0.80 0.80 590\nweighted avg 0.80 0.80 0.80 590\n\n"
],
[
"# Logistic Regression with TF-IDF\n\n# Train the model\nlgs_tfidf = LogisticRegression(solver='lbfgs')\nlgs_tfidf.fit(tfidf_X, labels)\n\n\n# Test the mode (score, predictions, confusion matrix, classification report)\nscore_lgs_tfidf = lgs_tfidf.score(test_tfidf_X, test_labels)\npredictions_lgs_tfidf = lgs_tfidf.predict(test_tfidf_X)\ncmatrix_lgs_tfidf = confusion_matrix(test_labels, predictions_lgs_tfidf)\ncreport_lgs_tfidf = classification_report(test_labels, predictions_lgs_tfidf)\n\nprint(\"\\n### Model Built ###\\n\")\ngenerate_report(cmatrix_lgs_tfidf, score_lgs_tfidf, creport_lgs_tfidf)",
"\n### Model Built ###\n\n precision recall f1-score support\n\n 0 0.79 0.93 0.85 306\n 1 0.90 0.73 0.81 284\n\n accuracy 0.83 590\n macro avg 0.85 0.83 0.83 590\nweighted avg 0.84 0.83 0.83 590\n\n"
],
[
"# Logistic Regression with Count Vectorizer\n\n# Train the model\nlgs_count = LogisticRegression(solver='lbfgs')\nlgs_count.fit(count_X, labels)\n\n\n# Test the mode (score, predictions, confusion matrix, classification report)\nscore_lgs_count = lgs_count.score(test_count_X, test_labels)\npredictions_lgs_count = lgs_count.predict(test_count_X)\ncmatrix_lgs_count = confusion_matrix(test_labels, predictions_lgs_count)\ncreport_lgs_count = classification_report(test_labels, predictions_lgs_count)\n\nprint(\"\\n### Model Built ###\\n\")\ngenerate_report(cmatrix_lgs_count, score_lgs_count, creport_lgs_count)",
"\n### Model Built ###\n\n precision recall f1-score support\n\n 0 0.83 0.93 0.88 306\n 1 0.91 0.80 0.85 284\n\n accuracy 0.86 590\n macro avg 0.87 0.86 0.86 590\nweighted avg 0.87 0.86 0.86 590\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e79dad14ef63cec75be6a3e71e9c0817775319d3 | 35,200 | ipynb | Jupyter Notebook | soluciones/ce.rueda12/tarea4/solucion.ipynb | SamuelCanas/FISI2028-202120 | 719daa807c626b77519993bfe5440b5ac7bdfb37 | [
"MIT"
] | 3 | 2021-08-17T19:19:11.000Z | 2021-11-08T12:26:41.000Z | soluciones/ce.rueda12/tarea4/solucion.ipynb | SamuelCanas/FISI2028-202120 | 719daa807c626b77519993bfe5440b5ac7bdfb37 | [
"MIT"
] | 12 | 2021-09-18T01:33:58.000Z | 2021-10-16T00:11:45.000Z | soluciones/ce.rueda12/tarea4/solucion.ipynb | SamuelCanas/FISI2028-202120 | 719daa807c626b77519993bfe5440b5ac7bdfb37 | [
"MIT"
] | 28 | 2021-09-17T22:38:23.000Z | 2021-10-02T19:59:49.000Z | 72.131148 | 13,544 | 0.718608 | [
[
[
"import sympy \nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport sklearn as sl\nimport numpy as np\nimport seaborn as sns; sns.set()\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\n%matplotlib inline\nfrom scipy.integrate import quad\nfrom scipy import integrate\nfrom sympy import fft\nfrom scipy.fftpack import fft",
"_____no_output_____"
]
],
[
[
"# Tarea 4\n\nCon base a los métodos vistos en clase resuelva las siguientes dos preguntas",
"_____no_output_____"
],
[
"## (A) Integrales\n\n* $\\int_{0}^{1}x^{-1/2}\\,\\text{d}x$\n* $\\int_{0}^{\\infty}e^{-x}\\ln{x}\\,\\text{d}x$\n* $\\int_{0}^{\\infty}\\frac{\\sin{x}}{x}\\,\\text{d}x$",
"_____no_output_____"
]
],
[
[
"def f(x):\n return x**(-0.5)\n\nn=1000000\n\ndef Integrando1(f):\n x,y = np.linspace(0,1, num = n +1, retstep = True)\n return (5/4)*y*f(x[0] + f(x[-1])) + y*np.sum(f(x[1:-1]))\n\nIntegrando1(f)\n",
"_____no_output_____"
],
[
"def f(x):\n return math.exp(-x)\n \ndef trapecio2(f, n, a, b):\n h = (b - a) / float(n)\n integrando = 0.5 * h * (f(a) + f(b))\n for i in range(1, int(n)):\n integrando = integrando + h * f(a + i * h)\n return integrando\n \na = 0\nb = 10\nn = 100\n \nwhile(abs(trapecio2(f, n, a, b) - trapecio2(f, n * 4, a * 2, b * 2)) > 1e-6):\n n *= 4\n a *= 2\n b *= 2\n\ntrapecio2(f, n, a, b)\n",
"_____no_output_____"
],
[
"def Integrando2(x):\n funcion = np.exp(-x)\n return funcion \nsolucion2 = quad(Integrando2,0,np.inf)\n\nsolucion2",
"_____no_output_____"
],
[
"Integrando3 = integrate.quad(lambda x : (np.sin(x))/x, 0, np.inf)[0]\nprint(\"valor exacto de la integral 3:\", Integrando3)",
"valor exacto de la integral 3: 2.247867963468919\n"
],
[
"n=100 \nx = np.linspace(0.000001,n,1000001)\n\nf = []\nfor i in range(len(x)):\n f.append(np.sin(x[i])/x[i])\n \nf = np.array(f)\n\ndef integrate(in_x,ft_f)->float:\n \n calculo=0\n for i in range(len(x)-1):\n calculo = calculo + ((ft_f[i+1])+(ft_f[i]))*abs(in_x[i+1]-in_x[i])/2\n \n return(calculo)\n\nintegral_3 = integrate(x,f)\n\nprint(f\" Integrando 3 {integral_3}\")\n",
" Integrando 3 1.5622244668962069\n"
]
],
[
[
"## (B) Fourier\n\nCalcule la transformada rápida de Fourier para la función de la **Tarea 3 (D)** en el intervalo $[0,4]$ ($k$ máximo $2\\pi n/L$ para $n=25$). Ajuste la transformada de Fourier para los datos de la **Tarea 3** usando el método de regresión exacto de la **Tarea 3 (C)** y compare con el anterior resultado. Para ambos ejercicios haga una interpolación y grafique para comparar.",
"_____no_output_____"
]
],
[
[
"df = pd.read_pickle(r\"C:\\Users\\Camilo Rueda\\Downloads\\ex1.gz\")\nsns.scatterplot(x='x',y='y',data=df)\nplt.show()\ndf",
"_____no_output_____"
],
[
"x = df[\"x\"]\ny = df[\"y\"]\n\n\nlx = []\nly = []\nfor i in range(len(x)):\n if x[i]<=1.5 :\n lx.append(x[i])\n ly.append(y[i])\n \n \nx = np.array(lx)\ny = np.array(ly)\n\ndef f(p,x): \n return (p[0])/((x-p[1])**2 + p[2])**p[3]\n\ndef L_ajuste(p,x,y): \n deltaY=f(p,x) - y\n return np.dot(deltaY,deltaY)/len(y)\n\nNf = 25\n\ndef a_j(j):\n global x, y\n k_j = 2*np.pi*j/4\n n_y = y*np.cos(k_j*x)\n return integrate.simpson(n_y, x)\n\ndef b_j(j):\n global x, y\n k_j = 2*np.pi*j/4\n n_y = y*np.sin(k_j*x)\n return integrate.simpson(n_y, x)\n \n \n\n \nA_j = np.array([a_j(j) for j in range(Nf)])\nB_j = np.array([b_j(j) for j in range(Nf)])\n",
"_____no_output_____"
],
[
"x_tilde = np.linspace(0, 4, 10000) \nk_j = np. array([2*np.pi*j/4 for j in range(Nf)])\ny_tilde = np.sum([ (A_j[j]*np.cos(k_j[j]*x_tilde) + B_j[j]*np.sin(k_j[j]*x_tilde)) for j in range(Nf) ], axis=0)\n",
"_____no_output_____"
],
[
"plt.plot(x,y)\nplt.plot(x_tilde, y_tilde)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e79db8c5f0c95099997a19edaaaaa0d1ee9ae1ac | 22,710 | ipynb | Jupyter Notebook | gateway-lesson/gateway/gateway-6.ipynb | mohsen-gis/test2 | c94a006a83cd834164b74652400ed4c53c1779f2 | [
"BSD-3-Clause"
] | null | null | null | gateway-lesson/gateway/gateway-6.ipynb | mohsen-gis/test2 | c94a006a83cd834164b74652400ed4c53c1779f2 | [
"BSD-3-Clause"
] | null | null | null | gateway-lesson/gateway/gateway-6.ipynb | mohsen-gis/test2 | c94a006a83cd834164b74652400ed4c53c1779f2 | [
"BSD-3-Clause"
] | null | null | null | 27.427536 | 396 | 0.552928 | [
[
[
"# Cyber Literacy in the World of Cyberinfrastructure\n\nHere you will learn about Cyber Literacy for GIScience. ",
"_____no_output_____"
]
],
[
[
"# This code cell starts the necessary setup for Hour of CI lesson notebooks.\n# First, it enables users to hide and unhide code by producing a 'Toggle raw code' button below.\n# Second, it imports the hourofci package, which is necessary for lessons and interactive Jupyter Widgets.\n# Third, it helps hide/control other aspects of Jupyter Notebooks to improve the user experience\n# This is an initialization cell\n# It is not displayed because the Slide Type is 'Skip'\n\nfrom IPython.display import HTML, IFrame, Javascript, display\nfrom ipywidgets import interactive\nimport ipywidgets as widgets\nfrom ipywidgets import Layout\n\nimport getpass # This library allows us to get the username (User agent string)\n\n# import package for hourofci project\nimport sys\nsys.path.append('../../supplementary') # relative path (may change depending on the location of the lesson notebook)\nimport hourofci\n\n# load javascript to initialize/hide cells, get user agent string, and hide output indicator\n# hide code by introducing a toggle button \"Toggle raw code\"\nHTML(''' \n <script type=\"text/javascript\" src=\\\"../../supplementary/js/custom.js\\\"></script>\n \n <style>\n .output_prompt{opacity:0;}\n </style>\n \n <input id=\"toggle_code\" type=\"button\" value=\"Toggle raw code\">\n''')",
"_____no_output_____"
]
],
[
[
"\n## Reminder\n\n<a href=\"#/slide-2-0\" class=\"navigate-right\" style=\"background-color:blue;color:white;padding:8px;margin:2px;font-weight:bold;\">Continue with the lesson</a>\n\n<font size=\"+1\">\n\nBy continuing with this lesson you are granting your permission to take part in this research study for the Hour of Cyberinfrastructure: Developing Cyber Literacy for GIScience project. In this study, you will be learning about cyberinfrastructure and related concepts using a web-based platform that will take approximately one hour per lesson. Participation in this study is voluntary.\n\nParticipants in this research must be 18 years or older. If you are under the age of 18 then please exit this webpage or navigate to another website such as the Hour of Code at https://hourofcode.com, which is designed for K-12 students.\n\nIf you are not interested in participating please exit the browser or navigate to this website: http://www.umn.edu. Your participation is voluntary and you are free to stop the lesson at any time.\n\nFor the full description please navigate to this website: <a href=\"gateway-1.ipynb\">Gateway Lesson Research Study Permission</a>.\n\n</font>\n\n",
"_____no_output_____"
],
[
"# What is in the World of Cyberinfrastructure?\n\nTo become a user of cyberinfrastructure to solve geospatial problems you must first know what it is all about.\nYou need to develop 'Cyber Literacy,' but what does that mean?\n\n",
"_____no_output_____"
],
[
"### Cyber Literacy for GIScience\n\n> The ability to understand and use established and emerging technologies to transform all forms and magnitudes of geospatial data into information for interdisciplinary problem solving.",
"_____no_output_____"
],
[
"## Literacy and the Three R's\n<table>\n <tr style=\"background: #fff\">\n <td align=left valign=top>\n\nIn the 18th and 19th centuries, general education was framed around gaining literacy in the Three R’s:\n\n\n1. Reading\n2. wRiting\n3. Reckoning (or, aRithmetic) \n\nHere, \"literacy\" meant the ability to decode and comprehend written language at a rudimentary level.\n\n</td>\n<td width=40%> <img src='supplementary/library_icon.png' alt='Person reading a book'></td>\n </tr>\n</table>",
"_____no_output_____"
],
[
"## Literacies\n\n<table>\n <tr style=\"background: #fff\">\n <td align=\"left\" valign=\"top\">\n\nLiteracies outline essential abilities and foundational knowledge. In the 21st century we recognize many new literacies ...\n\n * Financial literacy\n * Health literacy\n * Ecoliteracy\n\nAnd now\n * Cyber literacy\n\n</td>\n<td width=40%> <img src='supplementary/hippo-mag-glass.png' alt='Hippo with magnifying glass'></td>\n </tr>\n</table>",
"_____no_output_____"
],
[
"## Cyber Literacy\n\n<table>\n <tr style=\"background: #fff\">\n <td align=\"left\" valign=\"top\">\n\nBasically ... Cyber Literacy for GIScience helps us make sense of the data-rich world using geospatial technologies and cyberinfrastructure.\n\n</td>\n<td width=30%> <img src='supplementary/world-to-server.png' alt='Data-rich world going into a server'></td>\n </tr>\n</table>",
"_____no_output_____"
],
[
"## Cyber Literacy: Breaking it down\n\n> “**the ability to understand and use \n> established and emerging technologies**\n> to transform all forms and magnitudes\n> of geospatial data into information\n> for interdisciplinary problem solving.”",
"_____no_output_____"
],
[
"You mean like Jupyter Notebooks?! Like these lessons?! \n\nYes! You have learned how to use Jupyter Notebooks and are currently using Cyberinfrastructure. You are using a National Science and Engineering Cloud resource called **'Jetstream'** led by the Indiana University Pervasive Technology Institute (PTI). <a href=\"https://jetstream-cloud.org/\">Click hear to learn more about Jetstream including how you can use Jetstream for free.</a>",
"_____no_output_____"
],
[
"## Cyber Literacy: Breaking it down\n\n> “the ability to understand and use \n> established and emerging technologies\n> **to transform all forms and magnitudes\n> of geospatial data into information**\n> for interdisciplinary problem solving.”\n\n",
"_____no_output_____"
],
[
"You mean like mapping Covid-19 using Python?! Yes! You have learned how to use Python to transform geospatial data into a useful map.\n\n",
"_____no_output_____"
],
[
"## Cyber Literacy: Breaking it down\n\n> “the ability to understand and use \n> established and emerging technologies\n> to transform all forms and magnitudes\n> of geospatial data into information\n> **for interdisciplinary problem solving.**”",
"_____no_output_____"
],
[
"You mean like combining Covid-19 cases (health science) and county geometry using geospatial technologies (geographic information science) and cyberinfrastructure (computational science)? Yes!",
"_____no_output_____"
],
[
"## Can you write? Are you a poet?\n\nCyber Literacy is NOT about being a computer genius or a programming wizard. Most people have learned basic literacy--the ability to read and write--however most people are not poets or experts in modern Nepali literature. Similarly, many people can learn basic cyber literacy while not being a programming wizard or expert in high-performance computing resources.\n\nYou are already on your way to learning cyber literacy. So let's take a closer look at the eight core areas of cyber literacy for GIScience.",
"_____no_output_____"
],
[
"## Eight core areas \n\n<table>\n <tr style=\"background: #fff\">\n <td align=left valign=top>\n\n\n\n\n</td>\n<td width=50%> \n\nHere are the eight core areas of cyber literacy for GIScience.\n\n\n\n</td>\n\n</tr>\n</table>\n\n\n",
"_____no_output_____"
],
[
"## Eight core areas \n\n<table>\n <tr style=\"background: #fff\">\n <td align=left valign=top>\n\n\n\n\n</td>\n<td width=50%> \n\nThe left side represents three key knowledge areas in GIScience: \n1. **Spatial Modeling and Analytics** \n2. **Geospatial Data** \n3. **Spatial Thinking**\n\n</td>\n\n</tr>\n</table>",
"_____no_output_____"
],
[
"## Eight core areas \n\n<table>\n <tr style=\"background: #fff\">\n <td align=left valign=top>\n\n\n\n\n</td>\n<td width=50%> \n\nThe right side represents three key knowledge areas in computational science: \n1. **Parallel Computing** \n2. **Big Data** \n3. **Computational Thinking**\n\n\n\n</td>\n\n</tr>\n</table>\n\n\n",
"_____no_output_____"
],
[
"## Eight core areas \n\n<table>\n <tr style=\"background: #fff\">\n <td align=left valign=top>\n\n\n\n\n</td>\n<td width=50%> \n\nThe top center represents a knowledge area to help these two disciplines integrate technologically: \n1. **Cyberinfrastructure** \n\n</td>\n\n</tr>\n</table>\n\n\n",
"_____no_output_____"
],
[
"## Eight core areas \n\n<table>\n <tr style=\"background: #fff\">\n <td align=left valign=top>\n\n\n\n\n</td>\n<td width=50%> \n\nJust as important; the bottom center represents a knowledge area to help these two disciplines integrate on the people and problem solving side: \n1. **Interdisciplinary Communication** \n\n</td>\n\n</tr>\n</table>\n\n\n",
"_____no_output_____"
],
[
"## Let's check in\n\nDo you have to be an expert in parallel computing to be cyber literate?\n",
"_____no_output_____"
]
],
[
[
"# Multiple choice question using a ToggleButton widget\n# This code cell has tags \"Init\", \"Hide\", and \"5A\"\nimport sys\nsys.path.append('../../supplementary') # relative path (may change depending on the location of the lesson notebook)\nimport hourofci\nwidget1=widgets.ToggleButtons(\n options=['Yes, absolutely!','No'],\n description='',\n disabled=False,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Yes!', 'No way!'],\n)\n\n# Show the options.\ndisplay(widget1)\n\ndef out1():\n print(\"Go to the next slide to see if you were correct.\")\n \nhourofci.SubmitBtn(widget1,out1)",
"_____no_output_____"
]
],
[
[
"## Nope\nNo! You do *not* need to be an expert in parallel programming to be cyber literate. Just like basic literacy, you can have a basic understanding of parallel programming and rely on other experts or tools to make use of the technology to advance your own research.",
"_____no_output_____"
],
[
"## Let's check in\n\nFind all of the core areas of cyber literacy for GIScience. Check all of them. Hold Ctrl to select multiple entries.",
"_____no_output_____"
]
],
[
[
"# Multiple choice question using a ToggleButton widget\n# This code cell has tags \"Init\", \"Hide\", and \"5A\"\n\nwidget2=widgets.SelectMultiple(\n options=['Interdisciplinary communication',\n 'The Internet',\n 'Parallel Computing',\n 'Geospatial Data',\n 'A Shark with a Laser',\n 'Computational Thinking',\n 'Cyberinfrastructure',\n 'Spatial Modeling and Analytics',\n 'Cyber Security',\n 'Spatial Thinking',\n 'Hip Po the Hippo',\n 'Big Data'],\n rows=12,\n description='',\n disabled=False\n)\n\n# Show the options.\ndisplay(widget2)\n\ndef out2():\n print(\"Go to the next slide to see if you were correct.\")\n \nhourofci.SubmitBtn(widget2,out2)",
"_____no_output_____"
]
],
[
[
"## Core knowledge areas and your next step\n\nWhat core knowledge area are you most excited about? ",
"_____no_output_____"
]
],
[
[
"widget3=widgets.RadioButtons(\n options=['Interdisciplinary communication',\n 'Parallel Computing',\n 'Geospatial Data',\n 'Computational Thinking',\n 'Cyberinfrastructure',\n 'Spatial Modeling and Analytics',\n 'Spatial Thinking',\n 'Big Data'],\n description='',\n disabled=False\n)\n\n\n# Show the options.\ndisplay(widget3)\n\ndef out3():\n print(\"Click the next slide to see how you can learn more about it!\")\n \nhourofci.SubmitBtn(widget3,out3)",
"_____no_output_____"
]
],
[
[
"## Did you know ...\n\nThere is an Hour of CI Beginner Lesson for each of the knowledge areas! You can try Beginner Lessons in any order, take one or take them all. It is up to you.\n\nFeel free to check out a Beginner Lesson once they are ready to pilot. Feel free to check back in a few weeks.\n\nFor now, let's move on to the last segment where you get to apply what you learned.\n\n<a href=\"gateway-exploration.ipynb\">Click here to go to the Gateway Exploration segment.</a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.