repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
TakumaE/TorusE | [
"80056fc6f2602219e4b84c16b0c3fb2108d702dd"
] | [
"models.py"
] | [
"import math\nimport tensorflow as tf\n\n\nclass BasicModel(object):\n def __init__(self, config, nent, nrel):\n super(BasicModel, self).__init__()\n self.config = config\n self.pos_h = tf.placeholder(tf.int32, [None])\n self.pos_t = tf.placeholder(tf.int32, [None])\n self.pos_r = tf.placeholder(tf.int32, [None])\n self.neg_h = tf.placeholder(tf.int32, [None])\n self.neg_t = tf.placeholder(tf.int32, [None])\n self.neg_r = tf.placeholder(tf.int32, [None])\n\n self.emb_ent = tf.Variable(tf.random_uniform([nent, config.emb_dim], -0.5, 0.5), name=\"ent_emb\")\n self.emb_rel = tf.Variable(tf.random_uniform([nrel, config.emb_dim], -0.5, 0.5), name=\"rel_emb\")\n\n pos_he = tf.nn.embedding_lookup(self.emb_ent, self.pos_h)\n pos_re = tf.nn.embedding_lookup(self.emb_rel, self.pos_r)\n pos_te = tf.nn.embedding_lookup(self.emb_ent, self.pos_t)\n\n neg_he = tf.nn.embedding_lookup(self.emb_ent, self.neg_h)\n neg_re = tf.nn.embedding_lookup(self.emb_rel, self.neg_r)\n neg_te = tf.nn.embedding_lookup(self.emb_ent, self.neg_t)\n\n pos_score = self.scoring_func(pos_he, pos_re, pos_te)\n neg_score = self.scoring_func(neg_he, neg_re, neg_te)\n\n # Margin loss\n self.loss = tf.reduce_sum(\n tf.maximum(tf.subtract(tf.add(pos_score, self.config.margin), neg_score), 0.))\n\n # Testing\n self.r_score = self.scoring_func(pos_he, pos_re, self.emb_ent)\n self.l_score = self.scoring_func(self.emb_ent, pos_re, pos_te)\n\n def scoring_func(self, h, r, t):\n raise NotImplementedError\n\n\nclass TransE(BasicModel):\n def __init__(self, config, nent, nrel):\n super(TransE, self).__init__(config, nent, nrel)\n\n def scoring_func(self, h, r, t):\n d = tf.subtract(tf.add(h, r), t)\n if \"l1\" in self.config.reg:\n return tf.reduce_sum(tf.abs(d), 1)\n else: # l2\n return tf.reduce_sum(tf.square(d), 1)\n\n\nclass TorusE(BasicModel):\n def __init__(self, config, nent, nrel):\n super(TorusE, self).__init__(config, nent, nrel)\n\n def scoring_func(self, h, r, t):\n d = tf.subtract(tf.add(h, r), t)\n d = d - tf.floor(d)\n d = tf.minimum(d, 1.0 - d)\n if \"el2\" in self.config.reg:\n return tf.reduce_sum(2 - 2 * tf.cos(2 * math.pi * d), 1) / 4\n elif \"l2\" in self.config.reg:\n return 4 * tf.reduce_sum(tf.square(d), 1)\n else: # l1\n return 2 * tf.reduce_sum(tf.abs(d), 1)\n"
] | [
[
"tensorflow.cos",
"tensorflow.minimum",
"tensorflow.floor",
"tensorflow.placeholder",
"tensorflow.add",
"tensorflow.square",
"tensorflow.random_uniform",
"tensorflow.nn.embedding_lookup",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
JonasDHomburg/LAMARCK | [
"0e372c908ff59effc6fd68e6477d04c4d89e6c26"
] | [
"LAMARCK_ML/architectures/variables/variables_test.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom LAMARCK_ML.architectures.variables import Variable\nfrom LAMARCK_ML.architectures.variables.initializer import GlorotUniform, Constant\nfrom LAMARCK_ML.architectures.variables.regularisation import L1, L2\nfrom LAMARCK_ML.data_util.dataType import DDouble, DFloat\n\n\nclass TestVariable(unittest.TestCase):\n\n def test_serialization_str(self):\n for trainable in [True, False]:\n a = Variable(dtype=DDouble, value=np.random.random((3, 7, 4, 9)), trainable=trainable,\n initializer=GlorotUniform(), regularisation=L1())\n b = Variable.__new__(Variable)\n b.__setstate__(a.__getstate__())\n self.assertEqual(a, b)\n self.assertEqual(b, a)\n b.trainable = not b.trainable\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.trainable = a.trainable\n self.assertEqual(b, a)\n b.dtype = DFloat\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.dtype = a.dtype\n self.assertEqual(b, a)\n b.value = np.random.random((3, 7, 4, 9))\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.value = a.value\n self.assertEqual(b, a)\n b.regularisation = L2()\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.regularisation = L1()\n self.assertEqual(b, a)\n b.initializer = Constant()\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.initializer = GlorotUniform()\n self.assertEqual(b, a)\n\n def test_serialization_pb(self):\n for trainable in [True, False]:\n a = Variable(dtype=DDouble, value=np.random.random((3, 7, 4, 9)), trainable=trainable,\n initializer=GlorotUniform(), regularisation=L1())\n b = Variable.__new__(Variable)\n b.__setstate__(a.get_pb())\n self.assertEqual(a, b)\n self.assertEqual(b, a)\n b.trainable = not b.trainable\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.trainable = a.trainable\n self.assertEqual(b, a)\n b.dtype = DFloat\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.dtype = a.dtype\n self.assertEqual(b, a)\n b.value = np.random.random((3, 7, 4, 9))\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.value = a.value\n self.assertEqual(b, a)\n b.regularisation = L2()\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.regularisation = L1()\n self.assertEqual(b, a)\n b.initializer = Constant()\n self.assertNotEqual(a, b)\n self.assertNotEqual(b, a)\n b.initializer = GlorotUniform()\n self.assertEqual(b, a)\n\n pass\n"
] | [
[
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tkerola/chainer-trt | [
"4e1adc0370e11ad7736a5fafdfd5aeca168c700e"
] | [
"test/test_model_retriever/inputs_test.py"
] | [
"import chainer\nimport numpy as np\n\nfrom helper import ModelRetrieverHelper\n\n\nclass TestInputs(ModelRetrieverHelper):\n\n @ModelRetrieverHelper.case\n def test_input_order(self):\n xs = [chainer.Variable(np.ones((1, 3, 32, 32))) for _ in range(5)]\n y = xs[0]\n for x in xs[1:]:\n y = y + x\n for i in range(len(xs)):\n self.retriever.register_inputs(xs[i], name='in_' + str(i))\n self.retriever(y)\n j = self.retriever.generate_json_source()\n assert j['inputs'] == ['in_0', 'in_1', 'in_2', 'in_3', 'in_4']\n\n @ModelRetrieverHelper.case\n def test_unused_registerd_input_should_not_appear(self):\n xs = [chainer.Variable(np.ones((1, 3, 32, 32))) for _ in range(5)]\n y = xs[0]\n for x in xs[1:-1]: # The last one is intentionally ignored\n y = y + x\n for i in range(len(xs)):\n self.retriever.register_inputs(xs[i], name='in_' + str(i))\n\n self.retriever(y)\n j = self.retriever.generate_json_source()\n assert j['inputs'] == ['in_0', 'in_1', 'in_2', 'in_3']\n\n @ModelRetrieverHelper.case\n def test_constants_should_not_appear_in_inputs(self):\n x = chainer.Variable(np.ones((1, 3, 32, 32)))\n c = chainer.Variable(np.ones((1, 3, 32, 32))) * 3\n y = x + c\n self.retriever.register_inputs(x, name='in')\n self.retriever(y)\n j = self.retriever.generate_json_source()\n assert j['inputs'] == ['in']\n assert sum(l['type'] == 'ConstantInput' for l in j['layers']) == 1\n\n @ModelRetrieverHelper.case\n def test_input_shouldnt_be_treated_as_constant_if_nothing_registered(self):\n x1 = chainer.Variable(np.ones((1, 3, 32, 32)))\n x2 = chainer.Variable(np.ones((1, 3, 32, 32)) * 2)\n y = x1 + x2\n self.retriever(y)\n j = self.retriever.generate_json_source()\n assert len(j['inputs']) == 2\n assert sum(l['type'] == 'input' for l in j['layers']) == 2\n assert sum(l['type'] == 'ConstantInput' for l in j['layers']) == 0\n\n @ModelRetrieverHelper.case\n def test_out_in_middle_of_already_read_graph(self):\n x = chainer.Variable(np.zeros((1, 3, 32, 32)))\n y1 = x + 1\n y2 = y1 * 2\n self.retriever(y2, name='y2')\n self.retriever(y1, name='y1')\n j = self.retriever.generate_json_source()\n assert len(j['outputs']) == 2\n y2_id, y2_name = j['outputs'][0]\n y1_id, y1_name = j['outputs'][1]\n assert y2_id == 'MulConstant-1-1' and y2_name == 'y2'\n assert y1_id == 'AddConstant-0-1' and y1_name == 'y1'\n"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
keshavchandra/Baroda-Finathon-Challenge | [
"af9bfe6e38165eccfe466f9edeb92480115ae40d"
] | [
"BarodaFinathon/branchevaluation/views.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import (\n\tAllowAny,\n\t)\nfrom sklearn.cluster import DBSCAN, KMeans\nfrom django.http import HttpResponse\nfrom math import pi, sqrt, fabs\nimport numpy as np\nimport googlemaps\nimport requests\nimport json\nimport time\n\n# New Branch Evaluation API\n# Stacks Used : Django v1.11, Django REST Framework, Scikit-Learn, Numpy, Google Maps Geocoding, Reverse Geocoding, Places API, Python v2.7.12.\n# Go to http://localhost:8000/api/branch/ to run the API\n# Make a POST request as {\"city\": \"city_name\"} to fetch the results\n\nclass BranchEvaluationAPI(APIView):\n\tpermission_class = (AllowAny,)\n\n\tdef get(self, request, format=None):\n\t\treturn Response(\"Baroda Finathon Challenge : New Branch Evaluation and Tender Filling API (The Capitalists)\")\n\n\tdef post(self, request, format=None):\n\t\tcity = request.data.get('city')\n\t\tcity = str(city)\n\t\tcity = str.title(city)\n\n\t\tradius = 35000\n\t\tjdata = json.loads(open ('district.json').read())\n\t\tfor i in jdata:\n\t\t\tif i['district'] == city:\n\t\t\t\tarea = i['area']\n\t\t\t\tarea = int(area.replace(',', ''))\n\t\t\t\tradius = int(sqrt(area / pi))*1000\n\n\t\tdata=[]\n\t\ts=0\n\t\tcnt=0\n\t\tnumber=[0]*6\n\t\tgmaps = googlemaps.Client(key='AIzaSyDPLRH4a8cunU3eRt67BNss90ej_VdYSVk')\n\t\tgeocode_result = gmaps.geocode(city)\n\t\tlocation = geocode_result[0]['geometry']['location']\n\t\tlatlng = (location['lat'], location['lng'])\n\n\t\t# Collecting data from various places with the help of Google Maps Places API\n\t\tplaces = ['post_office', 'hospital', 'bank', 'bus_station', 'atm', 'school']\n\t\tfor i in places:\n\t\t\td = {'location': latlng, 'radius': radius, 'type': i}\n\t\t\tplaces_result = gmaps.places_nearby(**d)\t\n\t\t\tfor x in places_result.get(\"results\"):\n\t\t\t\tdata.append((x.get(\"geometry\").get(\"location\").get(\"lat\"),x.get(\"geometry\").get(\"location\").get(\"lng\")))\n\t\t\ttoken = places_result.get(\"next_page_token\")\n\t\t\twhile token:\n\t\t\t\ttime.sleep(2)\n\t\t\t\td['page_token'] = token\n\t\t\t\tplaces_result = gmaps.places_nearby(**d)\n\t\t\t\tfor x in places_result.get(\"results\"):\n\t\t\t\t\tdata.append((x.get(\"geometry\").get(\"location\").get(\"lat\"),x.get(\"geometry\").get(\"location\").get(\"lng\")))\n\t\t\t\ttoken = places_result.get(\"next_page_token\")\n\t\t\tnumber[cnt]=len(data)-s\n\t\t\ts += number[cnt]\n\t\t\tcnt+=1\n\n\t\t# Applying DBSCAN Algorithm over the collected coordinates points to detect the number of clusters formed\n\t\tkms_per_radian = 6371.0088\n\t\tepsilon = 1.15 / kms_per_radian\n\t\tdb = DBSCAN(eps=epsilon, min_samples=1, algorithm='ball_tree', metric='haversine').fit(np.radians(data))\n\t\tcore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n\t\tcore_samples_mask[db.core_sample_indices_] = True\n\t\tlabels = db.labels_\n\t\tn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\t\tprint (n_clusters_)\n\n\t\t# Applying KMeans Algorithm over the points to find out centroid points\n\t\tkmeans = KMeans(n_clusters=n_clusters_)\n\t\tkmeans.fit(data)\n\t\tlabels = kmeans.labels_\n\t\tcentroids = kmeans.cluster_centers_.tolist()\n\t\tcentroids = [[round(x,7) for x in y] for y in centroids]\n\t\tprint (len(centroids))\n\t\tprint (centroids)\n\n\t\t# Getting Banks location from Bank of Baroda City Wise Search API\n\t\tapi_search = requests.post(\"http://104.211.176.248:8080/bob/bobuat/api/CitywiseSearch\", headers={\"apikey\":\"Kf1L7J1jQ0dFasC\",\"content-type\":\"application/json\"}, json={\"city\":city}).json()\n\t\tapi_data = []\n\t\tfor x in api_search:\n\t\t\tapi_data.append([x.get(\"latitude\"),x.get(\"longitude\")])\n\t\tprint (len(api_data))\n\t\tprint (api_data)\n\n\t\t# Subtracting Nearby banks locations from the clustering data\n\t\ttempList=[]\n\t\tfor x in centroids:\n\t\t\tadd=True\n\t\t\tfor y in api_data:\n\t\t\t\tif fabs(x[0]-y[0])< 0.02 and fabs(x[1]-y[1])<0.02:\n\t\t\t\t\tadd = False\n\t\t\t\t\tbreak\n\t\t\tif add:\n\t\t\t\ttempList.append(x)\n\t\tresult = [x for x in tempList if x not in api_data]\n\t\tprint (len(result))\n\t\tprint (result)\n\n\t\t# Output Code\n\n\t\toutput='''\n\t\t<!DOCTYPE html>\n\t\t<html> \n\t\t<head> \n\t\t <meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\"> \n\t\t <title>Branch Evaluation Api – Django REST framework</title> \n\t\t <script src=\"http://maps.google.com/maps/api/js?key=AIzaSyDPLRH4a8cunU3eRt67BNss90ej_VdYSVk&sensor=false\"></script>\n\t\t</head> \n\t\t<style>\n\t #info\n\t\t{\n\t\t position: fixed;\n\t\t color: #fff;\n\t\t background: rgba(0,0,0,0.5);\n\t\t border-radius: 25px;\n\t\t padding: 10px;\n\t\t right:0;\n\t\t top:10px;\n\t\t z-index: 1;\n\t\t}\n\t\ttable, th, td\n\t\t{\n\t\t border :1px solid black;\n\t\t border-collapse : collapse;\n\t text-align: center;\n\t\t padding: 10px;\n\t\t}\n\t\t</style>\n\t\t<body>\n\t\t <div id=\"info\">\n\t\t <p align=\"left\">City: <b>'''+city+'''</b></p>\n\t\t <p align=\"left\">Location: <b>'''+str(location['lat'])+''','''+str(location['lng'])+'''</b></p>\n\t\t <p>1. Number of Cluster points dectected by our analysis: <b>'''+str(len(centroids))+'''</b>. Denoted by marker <img src=\"http://maps.google.com/mapfiles/ms/icons/red-dot.png\" alt=\"red marker\" /></p>\n\t\t <p>2. Number of Bank locations returned by Bank API: <b>'''+str(len(api_data))+'''</b>. Denoted by marker <img src=\"http://maps.google.com/mapfiles/ms/icons/green-dot.png\" alt=\"green marker\" /></p>\n\t\t <p>3. Number of New Bank locations predicted by us: <b>'''+str(len(result))+'''</b>. Denoted by marker <img src=\"http://maps.google.com/mapfiles/ms/icons/blue-dot.png\" alt=\"blue marker\" /></p>\n\t\t </div>\n\t\t <div id=\"map1\" style=\"width: 900px; height: 800px;\"></div>\n\t\t <br>\n\t\t <div id=\"map2\" style=\"width: 900px; height: 800px;\"></div>\n\t\t <br>\n\t\t <table>\n\t\t <thead>\n\t\t <th>S No.</th>\n\t\t <th>Latitude</th>\n\t\t <th>Longitude</th>\n\t\t <th>New Banks to be opened nearby these addresses</th>\n\t\t </thead>\n\t\t <tbody>\n\t\t '''\n\t\tcount=0\n\t\tfor latlng in result:\n\t\t\td = {'latlng': latlng}\n\t\t\trg = gmaps.reverse_geocode(**d)\n\t\t\trg = rg[0]\n\t\t\tcount+=1\n\t\t\toutput+='<tr><td>'+str(count)+'</td><td>'+(str(rg.get(\"geometry\").get(\"location\").get(\"lat\"))+'</td><td>'+str(rg.get(\"geometry\").get(\"location\").get(\"lng\"))+'</td><td>'+rg.get(\"formatted_address\"))+'</td></tr>'\n\n\t\toutput+='''\n\t\t </tbody>\n\t\t </table>\n\t\t <script>\n\t\t // Define your locations: HTML content for the info window, latitude, longitude\n\t\t var locations1 = '''+json.dumps(centroids)+''';\n\t\t var locations2 = '''+json.dumps(api_data)+''';\n\t\t\tvar locations3 = '''+json.dumps(result)+''';\n\t\t // Setup the different icons and shadows\n\t\t var iconURLPrefix = 'http://maps.google.com/mapfiles/ms/icons/';\n\n\t\t var icons = [\n\t\t iconURLPrefix + 'red-dot.png',\n\t\t iconURLPrefix + 'green-dot.png',\n\t\t iconURLPrefix + 'blue-dot.png',\n\t\t ]\n\t\t var iconsLength = icons.length;\n\n\t\t var map1 = new google.maps.Map(document.getElementById('map1'), {\n\t\t zoom: 11,\n\t\t center: new google.maps.LatLng('''+str(location['lat'])+''','''+str(location['lng'])+'''),\n\t\t mapTypeId: google.maps.MapTypeId.ROADMAP,\n\t\t mapTypeControl: false,\n\t\t streetViewControl: false,\n\t\t panControl: false,\n\t\t zoomControlOptions: {\n\t\t position: google.maps.ControlPosition.LEFT_BOTTOM\n\t\t }\n\t\t });\n\n\t\t var map2 = new google.maps.Map(document.getElementById('map2'), {\n\t\t zoom: 11,\n\t\t center: new google.maps.LatLng('''+str(location['lat'])+''','''+str(location['lng'])+'''),\n\t\t mapTypeId: google.maps.MapTypeId.ROADMAP,\n\t\t mapTypeControl: false,\n\t\t streetViewControl: false,\n\t\t panControl: false,\n\t\t zoomControlOptions: {\n\t\t position: google.maps.ControlPosition.LEFT_BOTTOM\n\t\t }\n\t\t });\n\n\t\t var markers = new Array();\n\n\t\t var iconCounter = 0;\n\n\t\t // Add the markers to the map\n\t\t for (var i = 0; i < locations1.length; i++) { \n\t\t var marker = new google.maps.Marker({\n\t\t position: new google.maps.LatLng(locations1[i][0], locations1[i][1]),\n\t\t map: map1,\n\t\t icon: icons[iconCounter]\n\t\t });\n\n\t\t markers.push(marker);\n\t\t }\n\n\t\t iconCounter++;\n\n\t\t for (var i = 0; i < locations2.length; i++) { \n\t\t var marker = new google.maps.Marker({\n\t\t position: new google.maps.LatLng(locations2[i][0], locations2[i][1]),\n\t\t map: map2,\n\t\t icon: icons[iconCounter]\n\t\t });\n\n\t\t markers.push(marker);\n\t\t }\n\n\t\t iconCounter++;\n\n\t\t for (var i = 0; i < locations3.length; i++) { \n\t\t var marker = new google.maps.Marker({\n\t\t position: new google.maps.LatLng(locations3[i][0], locations3[i][1]),\n\t\t map: map2,\n\t\t icon: icons[iconCounter]\n\t\t });\n\n\t\t markers.push(marker);\n\t\t }\n\n\t\t function autoCenter() {\n\t\t // Create a new viewpoint bound\n\t\t var bounds = new google.maps.LatLngBounds();\n\t\t // Go through each...\n\t\t for (var i = 0; i < markers.length; i++) { \n\t\t bounds.extend(markers[i].position);\n\t\t }\n\t\t // Fit these bounds to the map\n\t\t map1.fitBounds(bounds);\n\t\t map2.fitBounds(bounds);\n\t\t }\n\t\t autoCenter();\n\t\t </script> \n\t\t</body>\n\t\t</html>\n\t\t'''\n\t\treturn HttpResponse(output)"
] | [
[
"numpy.radians",
"numpy.zeros_like",
"sklearn.cluster.KMeans",
"sklearn.cluster.DBSCAN"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luizhsalazar/no-show-ml | [
"bbe3a2ecc93b14197147ce399008beef28be7a14"
] | [
"api/app/main.py"
] | [
"from typing import Optional\n\nimport pandas as pd\nfrom fastapi import FastAPI\n\nfrom app.model import Model\n\napp = FastAPI()\nmodel = Model(\"xgboost\", \"Production\")\n\[email protected](\"/\")\ndef read_root():\n data = pd.read_csv('casas_X.csv')\n\n return { \"model_name\" : model.make_predictions(data) }\n\[email protected](\"/items/{item_id}\")\ndef read_item(item_id: int, q: Optional[str] = None):\n return {\"item_id\": item_id, \"q\": q}\n\n# @app.post(\"/predict\")\n# async def create_upload_file(file: UploadFile = File(...)):\n# if file.filename.endswith(\".csv\"):\n# with open(file.filename, \"wb\")as f:\n# f.write(file.file.read())\n# data = pd.read_csv(file.filename)\n# os.remove(file.filename)\n\n# return {\n# \"Labels\": model.predict(data)\n# }\n# else:\n# raise HTTPException(status_code=400, detail=\"Invalid file format. Only CSV Files accepted.\")\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
greenfrogs/FHIR-Parser | [
"1408da63e93eff753d4cba82f4fef9e2c177c8c9"
] | [
"examples/graph_marital_status.py"
] | [
"import matplotlib.pyplot as plt\nfrom fhir_parser import FHIR\n\nfhir = FHIR()\npatients = fhir.get_all_patients()\n\nmarital_status = {}\nfor patient in patients:\n if str(patient.marital_status) in marital_status:\n marital_status[str(patient.marital_status)] += 1\n else:\n marital_status[str(patient.marital_status)] = 1\n\n\nplt.bar(range(len(marital_status)), list(marital_status.values()), align='center')\nplt.xticks(range(len(marital_status)), list(marital_status.keys()))\nplt.show()"
] | [
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suomitekai/fairing | [
"9ca6a1138529b3f0b21979d62c7cb1f303bc52e0"
] | [
"examples/distributed-training/main.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simple MNIST classifier which displays summaries in TensorBoard.\n\nThis is an unimpressive MNIST model, but it is a good example of using\ntf.name_scope to make a graph legible in the TensorBoard graph explorer, and of\nnaming summary tags so that they are grouped meaningfully in TensorBoard.\n\nIt demonstrates the functionality of every TensorBoard dashboard.\n\"\"\"\n\nimport os\nimport json\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\nMAX_STEPS = 1000\nLEARNING_RATE = 0.001\nDROPOUT = 0.9\nDATA_DIR = os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),\n 'tensorflow/input_data')\nLOG_DIR = os.path.join(os.getenv('TEST_TMPDIR', '/tmp'), 'tensorflow/logs')\n\n\nclass TensorflowModel(object):\n def build(self): # pylint:disable=too-many-statements\n tf_config_json = os.environ.get(\"TF_CONFIG\", \"{}\")\n tf_config = json.loads(tf_config_json)\n\n task = tf_config.get(\"task\", {})\n cluster_spec = tf_config.get(\"cluster\", {})\n cluster_spec_object = tf.train.ClusterSpec(cluster_spec)\n job_name = task[\"type\"]\n task_id = task[\"index\"]\n server_def = tf.train.ServerDef(\n cluster=cluster_spec_object.as_cluster_def(),\n protocol=\"grpc\",\n job_name=job_name,\n task_index=task_id)\n self.server = tf.train.Server(server_def)\n self.is_chief = (job_name == 'chief' or job_name == 'master')\n\n # Import data\n self.mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)\n\n # Between-graph replication\n with tf.device(tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d\" % task_id,\n cluster=cluster_spec)):\n\n # count the number of updates\n self.global_step = tf.get_variable(\n 'global_step',\n [],\n initializer=tf.constant_initializer(0),\n trainable=False)\n\n # Input placeholders\n with tf.name_scope('input'):\n self.x = tf.placeholder(\n tf.float32, [None, 784], name='x-input')\n self.y_ = tf.placeholder(\n tf.float32, [None, 10], name='y-input')\n\n with tf.name_scope('input_reshape'):\n image_shaped_input = tf.reshape(self.x, [-1, 28, 28, 1])\n tf.summary.image('input', image_shaped_input, 10)\n\n # We can't initialize these variables to 0 - the network will get stuck.\n def weight_variable(shape):\n \"\"\"Create a weight variable with appropriate initialization.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n \"\"\"Create a bias variable with appropriate initialization.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n \"\"\"Reusable code for making a simple neural net layer.\n\n It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.\n It also sets up name scoping so that the resultant graph is easy to read,\n and adds a number of summary ops.\n \"\"\"\n # Adding a name scope ensures logical grouping of the layers in the graph.\n with tf.name_scope(layer_name):\n # This Variable will hold the state of the weights for the layer\n with tf.name_scope('weights'):\n weights = weight_variable([input_dim, output_dim])\n variable_summaries(weights)\n with tf.name_scope('biases'):\n biases = bias_variable([output_dim])\n variable_summaries(biases)\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights) + biases\n tf.summary.histogram('pre_activations', preactivate)\n activations = act(preactivate, name='activation')\n tf.summary.histogram('activations', activations)\n return activations\n\n hidden1 = nn_layer(self.x, 784, 500, 'layer1')\n\n with tf.name_scope('dropout'):\n self.keep_prob = tf.placeholder(tf.float32)\n tf.summary.scalar('dropout_keep_probability', self.keep_prob)\n dropped = tf.nn.dropout(hidden1, self.keep_prob)\n\n # Do not apply softmax activation yet, see below.\n y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)\n\n with tf.name_scope('cross_entropy'):\n # The raw formulation of cross-entropy,\n #\n # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),\n # reduction_indices=[1]))\n #\n # can be numerically unstable.\n #\n # So here we use tf.nn.softmax_cross_entropy_with_logits on the\n # raw outputs of the nn_layer above, and then average across\n # the batch.\n diff = tf.nn.softmax_cross_entropy_with_logits(\n labels=self.y_, logits=y)\n with tf.name_scope('total'):\n cross_entropy = tf.reduce_mean(diff)\n tf.summary.scalar('cross_entropy', cross_entropy)\n\n with tf.name_scope('train'):\n self.train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(\n cross_entropy)\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(\n tf.argmax(y, 1), tf.argmax(self.y_, 1))\n with tf.name_scope('accuracy'):\n self.accuracy = tf.reduce_mean(\n tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', self.accuracy)\n\n # Merge all the summaries and write them out to\n # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)\n self.merged = tf.summary.merge_all()\n\n self.init_op = tf.global_variables_initializer()\n\n def train(self):\n self.build()\n\n def feed_dict(train):\n \"\"\"Make a TensorFlow feed_dict: maps data onto Tensor placeholders.\"\"\"\n if train:\n xs, ys = self.mnist.train.next_batch(100, fake_data=False)\n k = DROPOUT\n else:\n xs, ys = self.mnist.test.images, self.mnist.test.labels\n k = 1.0\n return {self.x: xs, self.y_: ys, self.keep_prob: k}\n\n sv = tf.train.Supervisor(is_chief=self.is_chief,\n global_step=self.global_step,\n init_op=self.init_op,\n logdir=LOG_DIR)\n\n with sv.prepare_or_wait_for_session(self.server.target) as sess:\n train_writer = tf.summary.FileWriter(\n LOG_DIR + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(LOG_DIR + '/test')\n # Train the model, and also write summaries.\n # Every 10th step, measure test-set accuracy, and write test summaries\n # All other steps, run train_step on training data, & add training summaries\n\n for i in range(MAX_STEPS):\n if i % 10 == 0: # Record summaries and test-set accuracy\n summary, acc = sess.run(\n [self.merged, self.accuracy], feed_dict=feed_dict(False))\n test_writer.add_summary(summary, i)\n print('Accuracy at step %s: %s' % (i, acc))\n else: # Record train set summaries, and train\n if i % 100 == 99: # Record execution stats\n run_options = tf.RunOptions(\n trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, _ = sess.run([self.merged, self.train_step],\n feed_dict=feed_dict(True),\n options=run_options,\n run_metadata=run_metadata)\n train_writer.add_run_metadata(\n run_metadata, 'step%03d' % i)\n train_writer.add_summary(summary, i)\n print('Adding run metadata for', i)\n else: # Record a summary\n summary, _ = sess.run(\n [self.merged, self.train_step], feed_dict=feed_dict(True))\n train_writer.add_summary(summary, i)\n train_writer.close()\n test_writer.close()\n\n\nif __name__ == '__main__':\n if os.getenv('FAIRING_RUNTIME', None) is None:\n from kubeflow import fairing\n fairing.config.set_preprocessor('python', input_files=[__file__])\n fairing.config.set_builder(\n name='docker', registry='gcr.io/mrick-gcp', base_image='tensorflow/tensorflow')\n fairing.config.set_deployer(\n name='tfjob', namespace='default', worker_count=1, ps_count=1)\n fairing.config.run()\n else:\n remote_train = TensorflowModel()\n remote_train.train()\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.RunMetadata",
"tensorflow.cast",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.summary.image",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.train.Server",
"tensorflow.truncated_normal",
"tensorflow.RunOptions",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.summary.histogram",
"tensorflow.reduce_max",
"tensorflow.summary.FileWriter",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.train.ClusterSpec",
"tensorflow.train.replica_device_setter",
"tensorflow.constant_initializer",
"tensorflow.reduce_min",
"tensorflow.train.Supervisor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Jumperkables/tvqa_modality_bias | [
"d753ad679e6a53f2be38e1b882164e33e991b0e6"
] | [
"tools/violin_plot_old.py"
] | [
"import sys, os\n#sys.path.insert(1, os.path.expanduser(\"~/kable_management/mk8+-tvqa\"))\n#sys.path.insert(1, os.path.expanduser(\"~/kable_management/projects/tvqa_modality_bias\"))\nsys.path.insert(1, \"..\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport argparse\nimport os\nimport random\n#from config import BaseOptions\nimport utils\nimport numpy as np\n\n\nclass BaseOptions(object):\n def __init__(self):\n self.parser = argparse.ArgumentParser()\n self.initialized = False\n self.opt = None\n \n def initialize(self):\n self.parser.add_argument(\"--lanecheck_path\", type=str, \n default=os.path.expanduser(\"~/kable_management/mk8+-tvqa/dataset_paper/jerry/results/tvqa_abc_svir_bert/lanecheck_dict.pickle_valid\"), \n help=\"Validation lane check path\")\n\n def parse(self):\n \"\"\"parse cmd line arguments and do some preprocessing\"\"\"\n if not self.initialized:\n self.initialize()\n opt = self.parser.parse_args()\n self.opt = opt\n return opt\n\ndef confusion_matrix_tn_fn(a_idx, ground_truth, prediciton):\n # if(a_idx == ground_truth) and (a_idx == prediciton):\n # return('True Positive')\n # if(a_idx != ground_truth) and (a_idx == prediciton):\n # return('False Positive')\n if(a_idx == ground_truth) and (a_idx != prediciton):\n return('False Negative')\n if(a_idx != ground_truth) and (a_idx != prediciton):\n return('True Negative')\n return('Ignore')\n\ndef confusion_matrix_tp_fp(a_idx, ground_truth, prediciton):\n if(a_idx == ground_truth) and (a_idx == prediciton):\n return('True Positive')\n if(a_idx != ground_truth) and (a_idx == prediciton):\n return('False Positive')\n # if(a_idx == ground_truth) and (a_idx != prediciton):\n # return('False Negative')\n # if(a_idx != ground_truth) and (a_idx != prediciton):\n # return('True Negative')\n return('Ignore') \n\n\n\n\ndef one_plot(opt):\n sns.set(style=\"whitegrid\", palette=\"pastel\", color_codes=True)\n # Font settings for plot\n import matplotlib\n # matplotlib.rc('font', family='sans-serif') \n # matplotlib.rc('font', serif='Helvetica Neue') \n # matplotlib.rc('text', usetex='false') \n # matplotlib.rcParams['font.family'] = 'cursive'\n\n # Load dictionary\n lanecheck_dict = utils.load_pickle(opt.lanecheck_path)\n\n # Lanecheck out\n sub_out = []\n vcpt_out = []\n vid_out = []\n reg_out = []\n regtopk_out = []\n\n # Check what out features are needed\n sub_flag = True\n vcpt_flag = True\n vid_flag = True\n reg_flag = True\n regtopk_flag= True\n check = random.choice(list(lanecheck_dict.values()))\n if check.get('sub_out') is None:\n sub_flag = False\n if check.get('vcpt_out') is None:\n vcpt_flag = False\n if check.get('vid_out') is None:\n vid_flag = False\n if check.get('reg_out') is None:\n reg_flag = False\n if check.get('regtopk_out') is None:\n regtopk_flag = False \n\n # Iterate through the lanecheck items\n del lanecheck_dict['acc']\n for qid, q_dict in lanecheck_dict.items():\n if sub_flag:\n sub_out.append( q_dict['sub_out'] )\n if vcpt_flag:\n vcpt_out.append( q_dict['vcpt_out'] )\n if vid_flag: \n vid_out.append( q_dict['vid_out'] )\n if reg_flag: \n reg_out.append( q_dict['reg_out'] )\n if regtopk_flag:\n regtopk_out.append( q_dict['regtopk_out'] )\n if sub_flag:\n sub_out = np.stack(sub_out)\n if vcpt_flag:\n vcpt_out = np.stack(vcpt_out)\n if vid_flag: \n vid_out = np.stack(vid_out)\n if reg_flag:\n reg_out = np.stack(reg_out)\n if regtopk_flag:\n regtopk_out = np.stack(regtopk_out)\n\n import pandas as pd\n\n # Plot settings\n pal_tp_fp = {\"True Positive\":sns.light_palette(\"green\")[1], \"False Positive\":sns.light_palette(\"red\")[1]}\n pal_tn_fn = {\"True Negative\":sns.light_palette(\"red\")[1], \"False Negative\":sns.light_palette(\"orange\")[1]}\n plot_no = 1\n\n sns.set(font_scale=3.0)\n sns.set_style(\"whitegrid\")\n fig, ax = plt.subplots()\n x_labels = []\n if sub_flag:\n sub_out = [ ('Subtitles', value, aa[5], aa[6], confusion_matrix_tn_fn(a_idx, aa[5], aa[6])) for aa in sub_out for a_idx, value in enumerate(aa[:5]) ]\n sub_out = [ element for element in sub_out if element[4] != 'Ignore' ]\n x_labels.append('Subtitles')\n if vcpt_flag:\n vcpt_out = [ ('Visual Concepts', value, aa[5], aa[6], confusion_matrix_tn_fn(a_idx, aa[5], aa[6])) for aa in vcpt_out for a_idx, value in enumerate(aa[:5]) ]\n vcpt_out = [ element for element in vcpt_out if element[4] != 'Ignore' ]\n x_labels.append('Visual Concepts')\n if vid_flag:\n vid_out = [ ('ImageNet', value, aa[5], aa[6], confusion_matrix_tn_fn(a_idx, aa[5], aa[6])) for aa in vid_out for a_idx, value in enumerate(aa[:5]) ]\n vid_out = [ element for element in vid_out if element[4] != 'Ignore' ]\n x_labels.append('ImageNet')\n if regtopk_flag:\n regtopk_out = [ ('Regional Features', value, aa[5], aa[6], confusion_matrix_tn_fn(a_idx, aa[5], aa[6])) for aa in regtopk_out for a_idx, value in enumerate(aa[:5]) ]\n regtopk_out = [ element for element in regtopk_out if element[4] != 'Ignore' ]\n x_labels.append('Regional Features')\n x_labels.append('Nothing inparticular')\n #plt.xticks([])\n data = []\n data += [('', 38, 1, 1, \"True Negative\")]\n data += [('1', -7, 1, 1, \"True Negative\")]\n data += sub_out\n data += vcpt_out\n data += vid_out\n data += regtopk_out\n\n maxx = 0\n minn = 0\n for dtuple in data:\n if maxx < dtuple[1]:\n maxx = dtuple[1]\n if minn > dtuple[1]:\n minn = dtuple[1]\n print(maxx)\n print(minn)\n\n # data += [('', 38.594997, 1, 1, \"False Positive\")]\n #data += [('1', -5.7718792, 1, 1, \"False Positive\")]\n data = pd.DataFrame(data, columns=['', 'Vote Contribution', 'ground_truth', 'prediction', 'Answer Type'])\n sns.violinplot(data=data, palette=pal_tn_fn, inner=\"quart\", linewidth=2.5, hue='Answer Type', x='', y='Vote Contribution', split=True, legend=False, legend_out=True)\n plt.title('SVIR Trained Model')\n plt.show()\n \n\n\nif __name__ == \"__main__\":\n\n opt = BaseOptions().parse()\n one_plot(opt)\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"numpy.stack",
"pandas.DataFrame",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nokia/zigzag | [
"67c00b9ccd5df7ccecf6e8fc0fc8d1214056b8b3"
] | [
"classes/spatial_loop.py"
] | [
"\"\"\"\nSpatial loop information extraction and integration.\n\nAlways note that the first level in spatial loops is MAC level (pure logic), not a memory level.\nThus the spatial loops always have one more level than temporal loops.\n\"\"\"\n\nimport numpy as np\n\n\nclass SpatialLoop(object):\n\n def __init__(self, spatial_loop, layer_loop_info):\n self.spatial_loop = spatial_loop\n\n Bu = {}\n Ku = {}\n Cu = {}\n OYu = {}\n OXu = {}\n FYu = {}\n FXu = {}\n\n unroll_size = {}\n unit_count = {}\n unit_unique = {}\n unit_duplicate = {}\n loop_levels = {}\n\n unit_serve_scope = {}\n data_serve_scope = {}\n real_bw_boost = {}\n real_bw_boost_high = {}\n real_bw_boost_low = {}\n\n for operand in ['W', 'I', 'O']:\n\n ''' \n Initialize the list size to the number of memory levels.\n\n Bu/Ku/.../FXu: the spatial loop unrolling size for each index \n at different mem level in W/I/O mem system.\n '''\n\n Bu[operand] = [1] * spatial_loop[operand].__len__()\n Ku[operand] = [1] * spatial_loop[operand].__len__()\n Cu[operand] = [1] * spatial_loop[operand].__len__()\n OYu[operand] = [1] * spatial_loop[operand].__len__()\n OXu[operand] = [1] * spatial_loop[operand].__len__()\n FYu[operand] = [1] * spatial_loop[operand].__len__()\n FXu[operand] = [1] * spatial_loop[operand].__len__()\n\n ''' \n unroll_size: the individual spatial unrolling at each level.\n\n unit_count: number of MAC/mem unit at certain level in W/I/O mem system, \n i.e. total spatial unrolling at each level.\n\n unit_unique: number of unique MAC/mem unit at certain level in W/I/O mem system,\n i.e. MACs operate on different values / mem units that hold different values.\n\n unit_duplicate: number of duplicated MAC/mem unit at certain level in W/I/O mem system,\n i.e. MACs operate on same values / mem units that hold same values.\n\n loop_levels: number of mem levels in W/I/O mem system + !!! the innermost MAC logic level !!! .\n '''\n\n unroll_size[operand] = [1] * spatial_loop[operand].__len__()\n unit_count[operand] = [1] * spatial_loop[operand].__len__()\n unit_unique[operand] = [1] * spatial_loop[operand].__len__()\n unit_duplicate[operand] = [1] * spatial_loop[operand].__len__()\n loop_levels[operand] = spatial_loop[operand].__len__()\n\n '''\n unit_serve_scope: one mem at current level serves how many unit at one level below.\n \n data_serve_scope: one data element at current level serves how many unit at one level below.\n (!! this parameter can be used as spatially data-sharing hint !!)\n \n real_bw_boost: one mem at current level serves how many unit at one level below with different data.\n '''\n\n unit_serve_scope[operand] = [1] * (spatial_loop[operand].__len__() - 1)\n data_serve_scope[operand] = [1] * (spatial_loop[operand].__len__() - 1)\n real_bw_boost[operand] = [1] * (spatial_loop[operand].__len__() - 1)\n real_bw_boost_high[operand] = [1] * (spatial_loop[operand].__len__() - 1)\n real_bw_boost_low[operand] = [1] * (spatial_loop[operand].__len__() - 1)\n\n for level, current_level_loops in enumerate(spatial_loop[operand]):\n for loop in current_level_loops:\n if loop[0] == 7:\n Bu[operand][level] *= loop[1]\n elif loop[0] == 6:\n Ku[operand][level] *= loop[1]\n elif loop[0] == 5:\n Cu[operand][level] *= loop[1]\n elif loop[0] == 4:\n OYu[operand][level] *= loop[1]\n elif loop[0] == 3:\n OXu[operand][level] *= loop[1]\n elif loop[0] == 2:\n FYu[operand][level] *= loop[1]\n elif loop[0] == 1:\n FXu[operand][level] *= loop[1]\n else:\n raise IndexError('The loop index can only be values from \"1\" to \"7\".')\n\n unroll_size[operand][level] *= loop[1]\n\n ''' \n unit_count is calculated by multiplying all the spatial loop unrolling index values (unroll_size) for W/I/O \n from current level to top level.\n\n Using ().item() to change datatype from numpy int64 to python default int.\n '''\n\n for operand in ['W', 'I', 'O']:\n for level in range(loop_levels[operand]):\n unit_count[operand][level] = (np.prod(unroll_size[operand][level:loop_levels[operand]])).item()\n\n ''' \n unit_unique is calculated by multiplying all the relevant spatial loop unrolling index values for W/I/O \n from current level to top level.\n\n buf_replicate = unit_count/unit_unique\n \n Using // (Floor Division) here to get an integer result out from division.\n '''\n\n for level in range(loop_levels['W']):\n unit_unique['W'][level] = (np.prod(Ku['W'][level:loop_levels['W']] +\n Cu['W'][level:loop_levels['W']] +\n FXu['W'][level:loop_levels['W']] +\n FYu['W'][level:loop_levels['W']])).item()\n\n unit_duplicate['W'][level] = unit_count['W'][level] / unit_unique['W'][level]\n\n for level in range(loop_levels['I']):\n # only when both FY and OY are spatially unrolled, IYu should be calculated by the 'advanced' equation:\n # IY = SY * (OY - 1) + SFY * (FY - 1) + 1\n\n # when only one of them is spatially unrolled, IYu should be calculated by the 'basic' equation:\n # IY = OY + FY - 1\n\n # For example: when stride on IY dimension = 2:\n # OYu 4 & FYu 1 -> IY = OY + FY - 1 = 4 + 1 - 1 = 4\n # we need IX 1, 3, 5, 7. (4 elements in total)\n\n # OYu 4 & FYu 3 -> IY = SY * (OY - 1) + SFY * (FY - 1) + 1 = 2*(4-1)+1*(3-1)+1 = 9\n # we need IX 1, 2, 3, 4, 5, 6, 7, 8, 9. (9 elements in total)\n\n if OYu['I'][level] == 1 or FYu['I'][level] == 1:\n IYu = (np.prod(OYu['I'][level:loop_levels['I']]) +\n np.prod(FYu['I'][level:loop_levels['I']]) - 1).item()\n else:\n IYu = (layer_loop_info['SY'] * (np.prod(OYu['I'][level:loop_levels['I']]) - 1) +\n layer_loop_info['SFY'] * (np.prod(FYu['I'][level:loop_levels['I']]) - 1) + 1).item()\n\n if OXu['I'][level] == 1 or FXu['I'][level] == 1:\n IXu = (np.prod(OXu['I'][level:loop_levels['I']]) +\n np.prod(FXu['I'][level:loop_levels['I']]) - 1).item()\n else:\n IXu = (layer_loop_info['SX'] * (np.prod(OXu['I'][level:loop_levels['I']]) - 1) +\n layer_loop_info['SFX'] * (np.prod(FXu['I'][level:loop_levels['I']]) - 1) + 1).item()\n unit_unique['I'][level] = (np.prod(Bu['I'][level:loop_levels['I']] +\n Cu['I'][level:loop_levels['I']] +\n [IYu] + [IXu])).item()\n\n unit_duplicate['I'][level] = unit_count['I'][level] / unit_unique['I'][level]\n\n for level in range(loop_levels['O']):\n unit_unique['O'][level] = (np.prod(Bu['O'][level:loop_levels['O']] +\n Ku['O'][level:loop_levels['O']] +\n OXu['O'][level:loop_levels['O']] +\n OYu['O'][level:loop_levels['O']])).item()\n\n unit_duplicate['O'][level] = unit_count['O'][level] / unit_unique['O'][level]\n\n ''' \n unit_serve_scope is calculated by dividing unit_count at current level by unit_count at one level above.\n \n data_serve_scope is calculated by dividing unit_duplicate at current level by unit_count at one level above.\n \n real_bw_boost can calculated by either dividing unit_unique at current level by unit_count at one level above,\n or by dividing unit_serve_scope by data_serve_scope element-wise.\n\n Note that the number of level here equals to total memory level. \n MAC level is excluded naturally here.\n \n e.g. real_bw_boost = [ 1, 2, 3, 4], \n real_bw_boost_high = [ 1, 2, 6, 24],\n real_bw_boost_low = [24, 24, 12, 4]\n '''\n\n for operand in ['W', 'I', 'O']:\n for level in range(int(loop_levels[operand] - 1)):\n unit_serve_scope[operand][level] = unit_count[operand][level] / unit_count[operand][level + 1]\n data_serve_scope[operand][level] = unit_duplicate[operand][level] / unit_duplicate[operand][level + 1]\n real_bw_boost[operand][level] = unit_unique[operand][level] / unit_unique[operand][level + 1]\n\n for operand in spatial_loop.keys():\n for level in range(int(loop_levels[operand] - 1)):\n real_bw_boost_high[operand][level] = (np.prod(real_bw_boost[operand][0:level + 1]))\n real_bw_boost_low[operand][level] = (np.prod(real_bw_boost[operand][level:loop_levels[operand]]))\n\n '''\n Simply extract spatial unrolling loops in spatial_loop_list.\n '''\n spatial_loop_list = []\n for loop_list in spatial_loop['W']:\n if not loop_list:\n continue\n else:\n for this_loop in loop_list:\n spatial_loop_list.append(this_loop)\n\n '''\n Added for LOMA\n '''\n # Relevant loop type numbers for each operand\n relevant_loop_type_numbers = {'W': [1,2,5,6], 'I': [5,7], 'O': [3,4,6,7]}\n irrelevant_loop_type_numbers = {'W': [3,4,7], 'I': [], 'O': [1,2,5]}\n \n ## Extract the relevant/irrelevant loop unrolling for each operand\n su_relevant_size_dict = {'W': [], 'I': [], 'O': []}\n su_irrelevant_size_dict = {'W': [], 'I': [], 'O': []}\n # WEIGHT and OUTPUT and INPUT relevant\n for operand in ['W', 'O', 'I']:\n for level in range(0, len(spatial_loop[operand])): # start at 0 = include MAC level\n su_relevant_size = 1\n su_irrelevant_size = 1\n for [loop_type_number, su_factor] in spatial_loop[operand][level]:\n if loop_type_number in relevant_loop_type_numbers[operand]:\n su_relevant_size *= su_factor\n elif loop_type_number in irrelevant_loop_type_numbers[operand]:\n su_irrelevant_size *= su_factor\n su_relevant_size_dict[operand].append(su_relevant_size)\n su_irrelevant_size_dict[operand].append(su_irrelevant_size)\n # INPUT partially relevant\n su_pr_size_dict_input = {1: [], 2: [], 3: [], 4: []} # 1 = FX, 2 = FY, 3 = OX, 4 = OY\n pr_loops = [1,2,3,4] # 1 = FX, 2 = FY, 3 = OX, 4 = OY\n for level in range(0, len(spatial_loop['I'])):\n su_pr_size = {1: 1, 2: 1, 3: 1, 4: 1}\n for [loop_type_number, su_factor] in spatial_loop[operand][level]:\n if loop_type_number in pr_loops:\n su_pr_size[loop_type_number] *= su_factor\n for key in pr_loops:\n su_pr_size_dict_input[key].append(su_pr_size[key])\n\n self.Bu = Bu\n self.Ku = Ku\n self.Cu = Cu\n self.OYu = OYu\n self.OXu = OXu\n self.FYu = FYu\n self.FXu = FXu\n\n self.unroll_size = unroll_size\n self.unit_count = unit_count\n self.unit_unique = unit_unique\n self.unit_duplicate = unit_duplicate\n self.loop_levels = loop_levels\n\n self.unit_serve_scope = unit_serve_scope\n self.data_serve_scope = data_serve_scope\n self.real_bw_boost = real_bw_boost\n self.real_bw_boost_high = real_bw_boost_high\n self.real_bw_boost_low = real_bw_boost_low\n\n self.spatial_loop_list = spatial_loop_list\n\n self.su_relevant_size_dict = su_relevant_size_dict\n self.su_irrelevant_size_dict = su_irrelevant_size_dict\n self.su_pr_size_dict_input = su_pr_size_dict_input\n\n\n @classmethod\n def extract_loop_info(cls, spatial_loop, layer_loop_info):\n return cls(spatial_loop, layer_loop_info)\n"
] | [
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
linesd/tabular-methods | [
"05ee6488feffc64d3bb7335f26b2e9688d90a57b"
] | [
"examples/example_qlearning.py"
] | [
"import sys\nsys.path.append(\"..\")\nimport numpy as np\nfrom env.grid_world import GridWorld\nfrom algorithms.temporal_difference import qlearning\nfrom utils.plots import plot_gridworld\nnp.random.seed(1)\n\n###########################################################\n# Run Q-Learning on cliff walk #\n###########################################################\n\n# specify world parameters\nnum_rows = 4\nnum_cols = 12\nrestart_states = np.array([[3,1],[3,2],[3,3],[3,4],[3,5],\n [3,6],[3,7],[3,8],[3,9],[3,10]])\nstart_state = np.array([[3,0]])\ngoal_states = np.array([[3,11]])\n\n# create model\ngw = GridWorld(num_rows=num_rows,\n num_cols=num_cols,\n start_state=start_state,\n goal_states=goal_states)\ngw.add_obstructions(restart_states=restart_states)\ngw.add_rewards(step_reward=-1,\n goal_reward=10,\n restart_state_reward=-100)\ngw.add_transition_probability(p_good_transition=1,\n bias=0)\ngw.add_discount(discount=0.9)\nmodel = gw.create_gridworld()\n\n# solve with Q-Learning\nq_function, pi, state_counts = qlearning(model, alpha=0.9, epsilon=0.2, maxiter=100, maxeps=10000)\n\n# plot the results\npath = \"../doc/imgs/qlearning_cliffworld.png\"\nplot_gridworld(model, policy=pi, state_counts=state_counts, title=\"Q-Learning\", path=path)"
] | [
[
"numpy.array",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kidpaul94/masknet | [
"f70324cfecd9c91dce532284efec02636c04af65"
] | [
"evaluation/evaluate_stats.py"
] | [
"import open3d as o3d\nimport argparse\nimport os\nimport sys\nimport logging\nimport numpy\nimport numpy as np\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport transforms3d.euler as t3d\nimport transforms3d\nimport time\n\n# Only if the files are in example folder.\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(BASE_DIR, os.pardir))\n\t\nfrom learning3d.models import MaskNet, PointNet, iPCRNet, RPMNet, PointNetLK, DGCNN, DCP, PRNet\nfrom learning3d.losses import FrobeniusNormLoss, RMSEFeaturesLoss\nfrom learning3d.losses import ChamferDistanceLoss\nfrom TestDataLoader import TestDataLoader\n\ndef pc2points(data):\n\tif len(data.shape) == 3:\n\t\treturn data[:, :, :3]\n\telif len(data.shape) == 2:\n\t\treturn data[:, :3]\n\n# To avoid samplenet.\nclass Identity(torch.nn.Module):\n\tdef forward(self, *input):\n\t\treturn input \t\t\t\t\t# return inputs as it is.\n\n# ICP registration module.\nclass ICP:\n\tdef __init__(self, threshold=0.1, max_iteration=10):\n\t\t# threshold: \t\t\tThreshold for correspondences. (scalar)\n\t\t# max_iterations:\t\tNumber of allowed iterations. (scalar)\n\t\tself.threshold = threshold\n\t\tself.criteria = o3d.registration.ICPConvergenceCriteria(max_iteration=max_iteration)\n\n\t# Preprocess template, source point clouds.\n\tdef preprocess(self, template, source):\n\t\tif self.is_tensor: template, source = template.detach().cpu().numpy(), source.detach().cpu().numpy()\t# Convert to ndarray if tensors.\n\n\t\tif len(template.shape) > 2: \t\t\t\t\t\t# Reduce dimension to [N, 3]\n\t\t\ttemplate, source = template[0], source[0]\n\n\t\t# Find mean of template & source.\n\t\tself.template_mean = np.mean(template, axis=0, keepdims=True)\n\t\tself.source_mean = np.mean(source, axis=0, keepdims=True)\n\t\t\n\t\t# Convert to open3d point clouds.\n\t\ttemplate_ = o3d.geometry.PointCloud()\n\t\tsource_ = o3d.geometry.PointCloud()\n\n\t\t# Subtract respective mean from each point cloud.\n\t\ttemplate_.points = o3d.utility.Vector3dVector(template - self.template_mean)\n\t\tsource_.points = o3d.utility.Vector3dVector(source - self.source_mean)\n\t\treturn template_, source_\n\n\t# Postprocess on transformation matrix.\n\tdef postprocess(self, res):\n\t\t# Way to deal with mean substraction\n\t\t# Pt = R*Ps + t \t\t\t\t\t\t\t\toriginal data (1)\n\t\t# Pt - Ptm = R'*[Ps - Psm] + t' \t\t\t\tmean subtracted from template and source.\n\t\t# Pt = R'*Ps + t' - R'*Psm + Ptm \t\t\t\trearrange the equation (2)\n\t\t# From eq. 1 and eq. 2,\n\t\t# R = R' \t&\tt = t' - R'*Psm + Ptm\t\t\t(3)\n\n\t\test_R = np.array(res.transformation[0:3, 0:3]) \t\t\t\t\t\t# ICP's rotation matrix (source -> template)\n\t\tt_ = np.array(res.transformation[0:3, 3]).reshape(1, -1)\t\t\t# ICP's translation vector (source -> template)\n\t\test_T = np.array(res.transformation)\t\t\t\t\t\t\t\t# ICP's transformation matrix (source -> template)\n\t\test_t = np.matmul(est_R, -self.source_mean.T).T + t_ + self.template_mean[0] \t# update predicted translation according to eq. 3\n\t\test_T[0:3, 3] = est_t\n\t\treturn est_R, est_t, est_T\n\n\t# Convert result to pytorch tensors.\n\t@staticmethod\n\tdef convert2tensor(result):\n\t\tif torch.cuda.is_available(): device = 'cuda'\n\t\telse: device = 'cpu'\n\t\tresult['est_R']=torch.tensor(result['est_R']).to(device).float().view(-1, 3, 3) \t\t# Rotation matrix [B, 3, 3] (source -> template)\n\t\tresult['est_t']=torch.tensor(result['est_t']).to(device).float().view(-1, 1, 3)\t\t\t# Translation vector [B, 1, 3] (source -> template)\n\t\tresult['est_T']=torch.tensor(result['est_T']).to(device).float().view(-1, 4, 4)\t\t\t# Transformation matrix [B, 4, 4] (source -> template)\n\t\treturn result\n\n\t# icp registration.\n\tdef __call__(self, template, source):\n\t\tself.is_tensor = torch.is_tensor(template)\n\n\t\ttemplate, source = self.preprocess(template, source)\n\t\tres = o3d.registration.registration_icp(source, template, self.threshold, criteria=self.criteria)\t# icp registration in open3d.\n\t\test_R, est_t, est_T = self.postprocess(res)\n\t\t\n\t\tresult = {'est_R': est_R,\n\t\t\t\t 'est_t': est_t,\n\t\t\t\t 'est_T': est_T}\n\t\tif self.is_tensor: result = self.convert2tensor(result)\n\t\treturn result\n\ndef pc2open3d(data):\n\tif torch.is_tensor(data): data = data.detach().cpu().numpy()\n\tif len(data.shape) == 2:\n\t\tpc = o3d.geometry.PointCloud()\n\t\tpc.points = o3d.utility.Vector3dVector(data)\n\t\treturn pc\n\telse:\n\t\tprint(\"Error in the shape of data given to Open3D!, Shape is \", data.shape)\n\n# Visualize data points.\ndef visualize_result(template, source, est_T):\n\t# template, source:\t\t\tPoint Clouds [N, 3] (ndarray)\n\t# est_T: \t\t\t\t\tPredicted Transformation [4, 4] (ndarray) (source -> template)\n\n\ttemplate, source, est_T = template[0], source[0], est_T[0]\n\ttransformed_source = np.matmul(est_T[0:3, 0:3], source.T).T + est_T[0:3, 3] \t\t# Rotate template as per inverse ground truth est_T.\n\t\n\t# Allocate points to each open3d point cloud.\n\ttemplate = pc2open3d(template)\n\tsource = pc2open3d(source)\n\ttransformed_source = pc2open3d(transformed_source)\n\t\n\t# Apply color to each point cloud.\n\ttemplate.paint_uniform_color([1, 0, 0])\n\tsource.paint_uniform_color([0, 1, 0])\n\ttransformed_source.paint_uniform_color([0, 0, 1])\n\n\t# Display point clouds.\n\to3d.visualization.draw_geometries([template, source, transformed_source])\n\n# Find error metrics.\ndef find_errors(gt_R, pred_R, gt_t, pred_t):\n\t# gt_R:\t\t\t\tRotation matrix [3, 3] (source = gt_R * template)\n\t# pred_R: \t\t\tRegistration algorithm's rotation matrix [3, 3] (template = pred_R * source)\n\t# gt_t:\t\t\t\ttranslation vector [1, 3] (source = template + gt_t)\n\t# pred_t: \t\t\tRegistration algorithm's translation matrix [1, 3] (template = source + pred_t)\n\n # Euler distance between ground truth translation and predicted translation.\n gt_t = -np.matmul(gt_R.T, gt_t) \t\t\t\t\t\t\t\t\t# gt translation vector (source -> template)\n translation_error = np.sqrt(np.sum(np.square(gt_t - pred_t)))\n\n # Convert matrix remains to axis angle representation and report the angle as rotation error.\n error_mat = np.dot(gt_R, pred_R)\t\t\t\t\t\t\t# matrix remains [3, 3]\n _, angle = transforms3d.axangles.mat2axangle(error_mat)\n return translation_error, abs(angle*(180/np.pi))\n\n# Evaluate metrics.\ndef evaluate_results(template, source, est_T, igt):\n\t# template, source: \t\tPoint Cloud [B, N, 3] (torch tensor)\n\t# est_T:\t\t\t\t\tPredicted transformation [B, 4, 4] (torch tensor) (template = est_T * source)\n\t# igt: \t\t\t\t\t\tGround truth transformation [B, 4, 4] (torch tensor) (source = igt * template)\n\n\ttransformed_source = torch.bmm(est_T[:, 0:3, 0:3], source.permute(0, 2, 1)).permute(0,2,1) + est_T[:, 0:3, 3]\n\ttry:\n\t\tcd_loss = ChamferDistanceLoss()(template, transformed_source).item()\t\t\t\t\t# Find chamfer distance between template and registered source.\n\texcept:\n\t\tcd_loss = 0.0\n\n\t# Find error metrices.\n\ttemplate, source, est_T, igt = template.detach().cpu().numpy()[0], source.detach().cpu().numpy()[0], est_T.detach().cpu().numpy()[0], igt.detach().cpu().numpy()[0]\n\ttranslation_error, rotation_error = find_errors(igt[:3, :3], est_T[:3, :3], igt[:3, 3], est_T[:3, 3])\n\treturn translation_error, rotation_error, cd_loss\n\n# Register template and source pairs.\ndef register(template, source, model, reg_algorithm, args):\n\t# template, source: \t\tPoint Cloud [B, N, 3] (torch tensor)\n\t# model:\t\t\t\t\tObj of mask network.\n\t# reg_algorithm:\t\t\tObj of registration algorithm.\n\n\t# No need to use normals. Only use normals for RPM-Net.\n\tif not args.reg_algorithm == 'rpmnet':\n\t\ttemplate, source = pc2points(template), pc2points(source)\n\n\tif args.masknet:\n\t\tmasked_template, pred_mask = model(template, source)\n\t\tresult = reg_algorithm(masked_template, source)\n\telse:\n\t\ttemplate, source = model(template, source) \t\t\t\t\t# Identity class is used.\n\t\tresult = reg_algorithm(template, source)\n\treturn result\n\n# Test a dataset using given registration algorithm.\ndef test_one_epoch(args, model, reg_algorithm, test_loader):\n\t# args: \t\t\tParameters required for testing.\n\t# model:\t\t\tEither obj of mask network or Identity class.\n\t# reg_algorithm:\tObj of registration algorithm.\n\t# test_loader:\t\tObj of test data loading class.\n\n\tmodel.eval()\n\tdevice = args.device\n\ttest_loss = 0.0\n\tpred = 0.0\n\tcount = 0\n\tt_errors, r_errors, cd_losses = [], [], []\t\t\t# lists to store metrics.\n\ttimings = []\n\n\tfor i, data in enumerate(tqdm(test_loader)):\n\t\ttemplate, source, igt, gt_mask = data \n\n\t\ttemplate = template.to(device)\n\t\tsource = source.to(device)\n\t\tigt = igt.to(device)\n\t\tgt_mask = gt_mask.to(device)\n\n\t\tstart = time.time()\n\t\tresult = register(template, source, model, reg_algorithm, args)\n\t\telapsed_time = time.time() - start\n\n\t\test_T = result['est_T']\n\n\t\t# Function to view result of a data point.\n\t\t# visualize_result(template.detach().cpu().numpy(), source.detach().cpu().numpy(), est_T.detach().cpu().numpy())\n\n\t\tt_err, r_err, cd_loss = evaluate_results(pc2points(template), pc2points(source), est_T, igt)\n\t\t\n\t\t# Add metrics to list.\n\t\tt_errors.append(t_err)\n\t\tr_errors.append(r_err)\n\t\tcd_losses.append(cd_loss)\n\t\ttimings.append(elapsed_time)\n\n\t\tcount += 1\n\n\treturn t_errors, r_errors, cd_loss, timings\n\n# Store mean and std. dev. in a text file.\ndef save_stats(t_errors, r_errors, cd_loss, timings, root_dir, group_name=''):\n\t# t_errors, r_erros, cd_loss, timing:\t\ttranslation err, rotation err, chamfer distances, elapsed time (list)\n\t# root_dir:\t\t\t\t\t\t\t\t\tPath of directory to store results.\n\t# group_name:\t\t\t\t\t\t\t\tName of the group in h5py file.\n\n\t# Compute mean and std of all metrices.\n\tr_mean, t_mean, cd_mean, time_mean = np.mean(r_errors), np.mean(t_errors), np.mean(cd_loss), np.mean(timings)\n\tr_std, t_std, cd_std, time_std = np.std(r_errors), np.std(t_errors), np.std(cd_loss), np.std(timings)\n\t\n\tfile = open(os.path.join(root_dir, 'results.txt'), 'a')\t\t\t# Append to a text file.\n\n\t# Store mean of results.\n\ttext = 'Mean Rotation Error: {}\\nMean Translation Error: {}\\nMean CD Loss: {}\\nMean Time: {} in sec.'.format(r_mean, t_mean, cd_mean, time_mean)\n\tif group_name: text = '\\n\\n' + group_name + '\\n' + text \t\t\t# for multiple results with various angles.\n\tfile.write(text)\n\t\n\t# Store std of results.\n\ttext = '\\n\\nStd. Dev. Rotation Error: {}\\nStd. Dev. Translation Error: {}\\nStd. Dev. CD Loss: {}\\nStd. Dev. Time: {} in sec.'.format(r_std, t_std, cd_std, time_std)\n\tfile.write(text)\n\tfile.close()\n\n# Store all metrics as lists in h5py file.\ndef save_results(t_errors, r_errors, cd_loss, timings, name, group_name=''):\n\t# t_errors, r_erros, cd_loss, timing:\t\ttranslation err, rotation err, chamfer distances, elapsed time (list)\n\t# name:\t\t\t\t\t\t\t\t\t\tName of the directory to store results.\n\t# group_name:\t\t\t\t\t\t\t\tName of the group in h5py file.\n\n\troot_dir = os.path.join(os.getcwd(), name)\n\tif not os.path.exists(root_dir): os.mkdir(root_dir)\n\n\timport h5py\n\tfile = h5py.File(os.path.join(root_dir, 'results.h5'), 'a')\t\t# Create h5py file.\n\n\tif group_name: group = file.create_group(group_name) \t\t\t# Create group for multiple results with misalignment levels.\n\telse: group = file\n\t\n\t# Store metrics either in group or the dataset.\n\tgroup.create_dataset('rotation_error', data=np.array(r_errors))\n\tgroup.create_dataset('translation_error', data=np.array(t_errors))\n\tgroup.create_dataset('cd_loss', data=np.array(cd_loss))\n\tgroup.create_dataset('timings', data=np.array(timings))\n\tfile.close()\n\n\tsave_stats(t_errors, r_errors, cd_loss, timings, root_dir, group_name=group_name)\n\t\n# Test a particular algorithm either with mask network or without it.\ndef test(args, model, reg_algorithm, test_loader):\n\tt_errors, r_errors, cd_loss, timings = test_one_epoch(args, model, reg_algorithm, test_loader)\n\tsave_results(t_errors, r_errors, cd_loss, timings, args.results_dir, group_name=args.group_name)\n\ndef options():\n\tparser = argparse.ArgumentParser(description='MaskNet: A Fully-Convolutional Network For Inlier Estimation (Statistical Evaluation)')\n\tparser.add_argument('--dataset_path', type=str, default='testdata.h5',\n\t\t\t\t\t\tmetavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'\n\tparser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')\n\tparser.add_argument('--kitti', type=bool, default=False, help='Train or Evaluate the network with KittiData.')\n\n\t# settings for on testing\n\tparser.add_argument('-b', '--batch_size', default=1, type=int,\n\t\t\t\t\t\tmetavar='N', help='mini-batch size (default: 32)')\n\tparser.add_argument('--device', default='cuda:0', type=str,\n\t\t\t\t\t\tmetavar='DEVICE', help='use CUDA if available')\n\n\t# algorithm for testing\n\tparser.add_argument('--masknet', default='True', type=str)\n\tparser.add_argument('--reg_algorithm', default='pointnetlk', type=str, choices=['pointnetlk', 'icp', 'dcp', 'prnet', 'pcrnet', 'rpmnet'])\n\n\tparser.add_argument('--pretrained_pnlk', default='../pretrained/exp_pointnetlk/models/no_noise_pointlk.pth', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to pretrained model file (default: null (no-use))')\n\tparser.add_argument('--pretrained_dcp', default='../pretrained/exp_dcp/models/best_model.t7', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to pretrained model file (default: null (no-use))')\n\tparser.add_argument('--pretrained_prnet', default='../pretrained/exp_prnet/models/best_model.t7', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to pretrained model file (default: null (no-use))')\n\tparser.add_argument('--pretrained_pcrnet', default='../pretrained/exp_ipcrnet/models/best_model.t7', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to pretrained model file (default: null (no-use))')\n\tparser.add_argument('--pretrained_rpmnet', default='../pretrained/exp_rpmnet/models/partial-trained.pth', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to pretrained model file (default: null (no-use))')\n\tparser.add_argument('--pretrained', default='../pretrained/model_masknet_ModelNet40.t7', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to pretrained model file (default: null (no-use))')\n\n\t# results\n\tparser.add_argument('--results_dir', default='results_samplenet_pnlk', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to store results')\n\tparser.add_argument('--group_name', default='', type=str,\n\t\t\t\t\t\tmetavar='PATH', help='path to store results')\n\n\targs = parser.parse_args()\n\tif args.masknet == 'False': args.masknet = False\n\tif args.masknet == 'True': args.masknet = True\n\treturn args\n\ndef main():\n\targs = options()\n\t\n\ttestset = TestDataLoader(args.dataset_path, args.group_name)\n\ttest_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=1)\n\n\tif not torch.cuda.is_available():\n\t\targs.device = 'cpu'\n\targs.device = torch.device(args.device)\n\n\t# Define Registration Algorithm.\n\tif args.reg_algorithm == 'pointnetlk':\n\t\tpnlk = PointNetLK()\n\t\tif args.pretrained_pnlk:\n\t\t\tassert os.path.isfile(args.pretrained_pnlk)\n\t\t\tpnlk.load_state_dict(torch.load(args.pretrained_pnlk, map_location='cpu'))\n\t\t\tprint(\"PointNetLK pretrained model loaded successfully!\")\n\t\tpnlk = pnlk.to(args.device)\n\t\treg_algorithm = pnlk\n\n\telif args.reg_algorithm == 'icp':\n\t\treg_algorithm = ICP()\n\n\telif args.reg_algorithm == 'dcp':\n\t\tdgcnn = DGCNN(emb_dims=512)\n\t\tmodel = DCP(feature_model=dgcnn, cycle=True)\n\t\tmodel = model.to(args.device)\n\t\tif args.pretrained_dcp:\n\t\t\tassert os.path.isfile(args.pretrained_dcp)\n\t\t\tmodel.load_state_dict(torch.load(args.pretrained_dcp), strict=False)\n\t\t\tprint(\"DCP pretrained model loaded successfully!\")\n\t\treg_algorithm = model\n\n\telif args.reg_algorithm == 'prnet':\n\t\tmodel = PRNet()\n\t\tif args.pretrained_dcp:\n\t\t\tassert os.path.isfile(args.pretrained_prnet)\n\t\t\tmodel.load_state_dict(torch.load(args.pretrained_prnet, map_location='cpu'))\n\t\t\tprint(\"PRNet pretrained model loaded successfully!\")\n\t\tmodel = model.to(args.device)\n\t\tmodel.eval()\n\t\treg_algorithm = model\n\n\telif args.reg_algorithm == 'pcrnet':\n\t\tptnet = PointNet(emb_dims=1024)\n\t\tmodel = iPCRNet(feature_model=ptnet)\n\t\tmodel = model.to(args.device)\n\t\tif args.pretrained_pcrnet:\n\t\t\tmodel.load_state_dict(torch.load(args.pretrained_pcrnet, map_location='cpu'))\n\t\t\tprint(\"PCRNet pretrained model loaded successfully!\")\n\t\treg_algorithm = model\n\n\telif args.reg_algorithm == 'rpmnet':\n\t\tmodel = RPMNet()\n\t\tmodel = model.to(args.device)\n\t\tif args.pretrained_rpmnet:\n\t\t\tmodel.load_state_dict(torch.load(args.pretrained_rpmnet, map_location='cpu')['state_dict'])\n\t\t\tprint(\"RPMNet pretrained model loaded successfully!\")\n\t\treg_algorithm = model\t\t\t\n\n\t# Define mask network.\n\tif args.masknet:\n\t\tmodel = MaskNet()\n\t\tmodel = model.to(args.device)\n\t\tif args.pretrained:\n\t\t\tassert os.path.isfile(args.pretrained)\n\t\t\tmodel.load_state_dict(torch.load(args.pretrained, map_location='cpu'))\n\t\t\tprint(\"MaskNet pretrained model loaded successfully!\")\n\t\tmodel.to(args.device)\n\telse:\n\t\tmodel = Identity()\n\n\ttest(args, model, reg_algorithm, test_loader)\n\nif __name__ == '__main__':\n\tmain()"
] | [
[
"numpy.square",
"numpy.dot",
"torch.load",
"torch.utils.data.DataLoader",
"torch.is_tensor",
"numpy.matmul",
"torch.tensor",
"numpy.std",
"numpy.mean",
"torch.cuda.is_available",
"torch.device",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JinsuRhee/TreeFrog | [
"ce876d78564f39de75df4bb65364c31a8c2c8fd0"
] | [
"examples/example_produce_walkabletree.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 16 11:05:33 2017\n\nExample of loading a raw tree, building the head/tail, root head/ root tail info writing a walkable tree.\n@author: Pascal Jahan Elahi\n\n\"\"\"\n\nimport sys\nimport os\nimport glob\nimport time\nimport numpy as np\nimport copy\nimport h5py\n\n#load python routines\nscriptpath=os.path.abspath(__file__)\nbasecodedir=scriptpath.split('examples/')[0]+'/tools/'\nsys.path.append(basecodedir)\n#load the cythonized code if compiled\nif (len(glob.glob(basecodedir+'velociraptor_python_tools_cython.*.so'))==1):\n print('using cython VR+TF toolkit')\n import velociraptor_python_tools_cython as vpt\nelse:\n print('using python VR+TF toolkit')\n import velociraptor_python_tools as vpt\n\n#base raw tree file name to load the raw tree if necessary\nbasetreefname=sys.argv[1]\n\n#director that contains the halo catalogs\nhalocatalogdir=sys.argv[2]\n\n#file name for the simplified tree file\noutputfname=sys.argv[3]\n\n#requested halo fields\nrequestedfields=[\n 'ID', 'hostHaloID',\n ]\n\n# define different types of input\nASCIIINPUT=0\nHDFINPUT=2\n\n#here we can easily get the version of TF\nTFVERSION = np.loadtxt(basecodedir+'../VERSION')\n#halo finder would need some updates\nHFNAME = 'VELOCIraptor'\nHFVERSION = 1.50\n\n# could alter to have user indicate input type but currently assume all is HDF\nRAWTREEFORMAT=HDFINPUT\nRAWPROPFORMAT=HDFINPUT\n\n# load the tree information stored in the file\n# such as temporal halo id, number of snapshots searched when producing the tree\nif (RAWTREEFORMAT == HDFINPUT):\n hfile = h5py.File(basetreefname+'.snapshot_000.VELOCIraptor.tree')\n numsnaps = hfile.attrs['Number_of_snapshots']\n TEMPORALHALOIDVAL = hfile.attrs['Temporal_halo_id_value']\n NSNAPSEARCH = hfile.attrs['Nsteps_search_new_links']\n TREEDIRECTION = hfile.attrs['Tree_direction']\n hfile.close()\n#if ascii, output of tree frog can be parsed\nelse :\n #currently the ascii file does not store the temporal halo id but this will be updated\n treefile = open(basetreename, 'r')\n numsnaps = np.int32(treefile.readline())\n description = treefile.readline()\n treedirectionstring = description.split('Produce tree in direction ').split(' |')[0]\n if (treedirectionstring == 'progenitors'):\n TREEDIRECTION = 0\n elif (treedirectionstring == 'descendants'):\n TREEDIRECTION = 1\n NSNAPSEARCH = np.int32(description.split('Tree built using ').split(' temporal steps')[0])\n TEMPORALHALOIDVAL = 1000000000000\n\n# number of particles used in the halo catalog\n# this can also be extracted from the halo catalog files directly, specifically\n# the configuration files\nNPARTTHRESHOLD=20\n\nrawtreedata = None\n#read raw descendant tree along with merits, don't reverse snap order,\nif (RAWTREEFORMAT == HDFINPUT):\n #for hdf input produce file listing input files\n snaptreelist=open(basetreefname+'.snaptreelist.txt','w')\n for i in range(numsnaps):\n snaptreelist.write(basetreefname+'.snapshot_%03d.VELOCIraptor\\n'%i)\n snaptreelist.close()\n fname = basetreefname+'.snaptreelist.txt'\nelse:\n fname = basetreename\n\nif (TREEDIRECTION == 1):\n rawtreedata=vpt.ReadHaloMergerTreeDescendant(fname, False, RAWTREEFORMAT, 1, True)\nelif (TREEDIRECTION == 0):\n print('Warning, progenitor based trees are less useful when building halo merger trees for SAMs.')\n rawtreedata=vpt.ReadHaloMergerTree(fname, RAWTREEFORMAT, 1, True)\nelse:\n print('Full graphs to walkable trees not implemented yet.')\n\nprint('Finished reading raw tree')\nnumhalos=np.zeros(numsnaps,dtype=np.uint64)\nhalodata=[dict() for i in range(numsnaps)]\natime=np.zeros(numsnaps)\nfor i in range(numsnaps):\n halodata[i],numhalos[i]=vpt.ReadPropertyFile(halocatalogdir+'/snapshot_%03d.VELOCIraptor'%i, 2, 0, 1, requestedfields)\n atime[i]=halodata[i]['SimulationInfo']['ScaleFactor']\n for key in halodata[i].keys():\n if (key == 'SimulationInfo' or key == 'UnitInfo' or key == \"ConfigurationInfo\"): continue\n if (halodata[i][key].dtype==np.float64):\n halodata[i][key] = np.array(halodata[i][key],dtype=np.float32)\nprint('Finished reading halo properties')\n\n#produce head tail in ascending order\nstart=time.clock()\nprint(\"Building head/tail \")\nvpt.BuildTemporalHeadTailDescendant(numsnaps, rawtreedata, numhalos, halodata,\n TEMPORALHALOIDVAL)\nprint(\"Finished head/tail \", time.clock()-start)\n\n#store the description\nDescriptionInfo={\n 'Title':'Walkable Tree',\n 'TreeBuilder' : {\n 'Name' : 'TreeFrog',\n 'Version' : TFVERSION,\n 'Temporal_linking_length' : NSNAPSEARCH,\n 'Temporal_halo_id_value' : TEMPORALHALOIDVAL,\n 'Tree_direction' : TREEDIRECTION,\n },\n 'HaloFinder' : {\n 'Name' : HFNAME, 'Version' : HFVERSION,\n 'Particle_num_threshold' : NPARTTHRESHOLD,\n },\n }\n\nvpt.WriteWalkableHDFTree(outputfname, numsnaps, rawtreedata, numhalos, halodata,\n atime, DescriptionInfo)\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
spokestack/spokestack-python | [
"95e451f9ab6ab1af2370d3e1007ebf6739e4765f"
] | [
"tests/tts/test_tts_client.py"
] | [
"\"\"\"\nThis module contains the tests for the TTSClient class\n\"\"\"\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom requests import Response\n\nfrom spokestack.tts.clients.spokestack import TextToSpeechClient, TTSError\n\n\ndef test_graphql():\n client = TextToSpeechClient(\"\", \"\", \"\")\n voice = \"voice\"\n profile = \"test\"\n\n for mode in [\"text\", \"ssml\", \"markdown\"]:\n method = f\"synthesize{mode[0].upper()}{mode[1:]}\"\n body = client._build_body(\"test\", mode=mode, voice=voice, profile=profile)\n assert f\"{method}(\" in body\n assert profile.upper() in body\n\n\ndef test_synthesize_text():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n test = np.ones(160).tobytes()\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n mock_iterable = mock.MagicMock(\n spec=Response().iter_content(), return_value=test\n )\n patched.post.return_value = MockResponse(status_code=200)\n patched.get.return_value = mock.Mock(\n iter_content=mock_iterable, status_code=200\n )\n response = client.synthesize(\"test utterance\")\n assert response == test\n response = client.synthesize(\"test utterance\", profile=\"alexa\")\n assert response == test\n\n\ndef test_synthesize_ssml():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n test = np.ones(160).tobytes()\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n mock_iterable = mock.MagicMock(\n spec=Response().iter_content(), return_value=test\n )\n patched.post.return_value = MockResponse(status_code=200)\n patched.get.return_value = mock.Mock(\n iter_content=mock_iterable, status_code=200\n )\n response = client.synthesize(\"<speak> test utterance </speak>\", mode=\"ssml\")\n assert response == test\n\n\ndef test_synthesize_markdown():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n test = np.ones(160).tobytes()\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n mock_iterable = mock.MagicMock(\n spec=Response().iter_content(), return_value=test\n )\n patched.post.return_value = MockResponse(status_code=200)\n patched.get.return_value = mock.Mock(\n iter_content=mock_iterable, status_code=200\n )\n response = client.synthesize(\"# test utterance\", mode=\"markdown\")\n assert response == test\n\n\ndef test_synthesize_url():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n mock_url = \"https://test\"\n patched.post.return_value = MockResponse(\n status_code=200,\n return_value={\"data\": {\"synthesizeText\": {\"url\": mock_url}}},\n )\n response = client.synthesize_url(\"# test utterance\", mode=\"text\")\n assert response == mock_url\n\n\ndef test_synthesize_invalid_mode():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n test = np.ones(160).tobytes()\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n patched.get.return_value = mock.Mock(content=test)\n with pytest.raises(ValueError):\n _ = client.synthesize(\"test utterance\", mode=\"python\")\n\n\ndef test_error_response():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n patched.post.return_value = MockResponse(\n status_code=200,\n return_value={\n \"data\": {\"synthesizeSSML\": None},\n \"errors\": [\n {\n \"locations\": [{\"column\": 0, \"line\": 3}],\n \"message\": \"synthesis_failed\",\n \"path\": [\"synthesizeSSML\"],\n }\n ],\n },\n )\n\n with pytest.raises(TTSError):\n _ = client.synthesize(\"utterance\")\n\n\ndef test_post_http_error():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n patched.post.return_value = MockResponse(status_code=201)\n with pytest.raises(Exception):\n _ = client.synthesize(\"utterance\")\n\n\ndef test_get_http_error():\n client = TextToSpeechClient(\"\", \"\", \"\")\n\n with mock.patch(\"spokestack.tts.clients.spokestack.requests\") as patched:\n patched.post.return_value = MockResponse(status_code=200)\n patched.get.return_value = MockResponse(status_code=201)\n with pytest.raises(Exception):\n _ = client.synthesize(\"utterance\")\n\n\nclass MockResponse(mock.MagicMock):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def json(self):\n return self.return_value\n"
] | [
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
odavalos/ACTINN-PyTorch | [
"1bc8b127968c2aec9e3d52c949f923434a4d189a"
] | [
"ACTINN/utils.py"
] | [
"# std libs\nimport os\nimport numpy as np\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import classification_report as class_rep\n\n\n# torch libs\nimport torch\n\n\ndef load_model(model, pretrained_path):\n \"\"\"\n Loading pre-trained weights of a model\n INPUTS:\n model -> a pytorch model which will be updated with the pre-trained weights\n pretrained_path -> path to where the .pth file is saved\n\n RETURN:\n the updated model\n \"\"\"\n weights = torch.load(pretrained_path)\n pretrained_dict = weights['Saved_Model'].state_dict()\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n\n\ndef save_checkpoint_classifier(model, epoch, iteration, prefix=\"\", dir_path = None):\n \"\"\"\n Saving pre-trained model for inference\n\n INPUTS:\n model-> PT model which we want to save\n epoch-> the current epoch number (will be used in the filename)\n iteration -> current iteration (will be used in the filename)\n prefix (optional)-> a prefix to the filename\n dir_path (optional)-> path to save the pre-trained model\n\n \"\"\"\n\n if not dir_path:\n dir_path = \"./ClassifierWeights/\"\n\n model_out_path = dir_path + prefix +f\"model_epoch_{epoch}_iter_{iteration}.pth\"\n state = {\"epoch\": epoch ,\"Saved_Model\": model}\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n torch.save(state, model_out_path)\n print(f\"Classifier Checkpoint saved to {model_out_path}\")\n\n \ndef evaluate_classifier(valid_data_loader, cf_model, \n classification_report:bool = False,\n device=None):\n \"\"\"\n Evaluating the performance of the network on validation/test dataset\n\n INPUTS:\n valid_data_loader-> a dataloader of the validation or test dataset\n cf_model-> the model which we want to use for validation\n classification_report -> if you want to enable classification report\n device-> if you want to run the evaluation on a specific device\n\n RETURN:\n None\n\n \"\"\"\n ##### This could be a bug if a user has GPUs but it not using them!\n\n if not device:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n print(\"==> Evaluating on Validation Set:\")\n total = 0;\n correct = 0;\n # for sklearn metrics\n y_true = np. array([])\n y_pred = np. array([])\n with torch.no_grad():\n for sample in valid_data_loader:\n data, labels = sample;\n data = data.to(device)\n labels = labels.to(device)\n outputs = cf_model(data)\n _, predicted = torch.max(outputs.squeeze(), 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n # get all the labels for true and pred so we could use them in sklearn metrics\n y_true = np.append(y_true,labels.detach().cpu().numpy())\n y_pred = np.append(y_pred,predicted.detach().cpu().numpy())\n \n print(f' -> Accuracy of classifier network on validation set: {(100 * correct / total):4.4f} %' )\n # calculating the precision/recall based multi-label F1 score\n macro_score = f1_score(y_true, y_pred, average = 'macro' )\n w_score = f1_score(y_true, y_pred,average = 'weighted' )\n print(f' -> Non-Weighted F1 Score on validation set: {macro_score:4.4f} ' )\n print(f' -> Weighted F1 Score on validation set: {w_score:4.4f} ' )\n if classification_report:\n print(class_rep(y_true,y_pred))"
] | [
[
"torch.load",
"torch.no_grad",
"torch.cuda.is_available",
"sklearn.metrics.f1_score",
"numpy.array",
"sklearn.metrics.classification_report",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vivarium-collective/vivarium-cell | [
"c504704a63ee8211f5f11e7fe486287dbc7553c3"
] | [
"vivarium_cell/library/lattice_utils.py"
] | [
"'''\n==================================\nUtilities for Lattice Environments\n==================================\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom scipy import constants\n\nfrom vivarium.library.units import units, Quantity\n\n\nAVOGADRO = constants.N_A / units.mol\n\n\ndef get_bin_site(location, n_bins, bounds):\n '''Get a bin's indices in the lattice\n\n Parameters:\n location (list): A list of 2 floats that specify the x and y\n coordinates of a point inside the desired bin.\n n_bins (list): A list of 2 ints that specify the number of bins\n along the x and y axes, respectively.\n bounds (list): A list of 2 floats that define the dimensions of\n the lattice environment along the x and y axes,\n respectively.\n\n Returns:\n tuple: A 2-tuple of the x and y indices of the bin in the\n lattice.\n '''\n bin_site_no_rounding = np.array([\n location[0] * n_bins[0] / bounds[0],\n location[1] * n_bins[1] / bounds[1]\n ])\n bin_site = tuple(\n np.floor(bin_site_no_rounding).astype(int) % n_bins)\n return bin_site\n\n\ndef get_bin_volume(n_bins, bounds, depth):\n '''Get a bin's volume\n\n Parameters:\n n_bins (list): A list of 2 ints that specify the number of bins\n along the x and y axes, respectively.\n bounds (list): A list of 2 floats that specify the lengths of\n the environment's sides in the x and y directions,\n respectively. In units of microns.\n depth (float): The depth of the environment, in microns.\n\n Returns:\n float: The volume of each bin in the lattice, in Liters.\n '''\n total_volume = (depth * bounds[0] * bounds[1]) * 1e-15 # (L)\n return total_volume / (n_bins[0] * n_bins[1])\n\n\ndef count_to_concentration(count, bin_volume):\n '''Convert a molecule count into a concentration.\n\n Parameters should all have units. Returned value will have units.\n\n Parameters:\n count (int): The number of molecules in the bin.\n bin_volume (float): The volume of the bin.\n\n Returns:\n float: The concentration of molecule in the bin.\n '''\n return count / (bin_volume * AVOGADRO)\n"
] | [
[
"numpy.array",
"numpy.floor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arv1997/Mask_RCNN | [
"648eb10c57427d61704a0be8d50c9e77d7c0c2ce"
] | [
"mrcnn/visualize_cv.py"
] | [
"import cv2\nimport numpy as np\n\n\ndef random_colors(N):\n np.random.seed(1)\n colors = [tuple(255 * np.random.rand(3)) for _ in range(N)]\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"apply mask to image\"\"\"\n for n, c in enumerate(color):\n image[:, :, n] = np.where(\n mask == 1,\n image[:, :, n] * (1 - alpha) + alpha * c,\n image[:, :, n]\n )\n return image\n\n\ndef display_instances(image, boxes, masks, ids, names, scores):\n \"\"\"\n take the image and results and apply the mask, box, and Label\n \"\"\"\n n_instances = boxes.shape[0]\n colors = random_colors(n_instances)\n\n if not n_instances:\n print('NO INSTANCES TO DISPLAY')\n else:\n assert boxes.shape[0] == masks.shape[-1] == ids.shape[0]\n\n for i, color in enumerate(colors):\n if not np.any(boxes[i]):\n continue\n\n y1, x1, y2, x2 = boxes[i]\n label = names[ids[i]]\n score = scores[i] if scores is not None else None\n caption = '{} {:.2f}'.format(label, score) if score else label\n mask = masks[:, :, i]\n\n image = apply_mask(image, mask, color)\n image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)\n image = cv2.putText(\n image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2\n )\n\n return image\n\n\nif __name__ == '__main__':\n \"\"\"\n test everything\n \"\"\"\n import os\n import sys\n import coco\n import utils\n import model as modellib\n\n ROOT_DIR = os.getcwd()\n MODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n COCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n if not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\n class InferenceConfig(coco.CocoConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n config = InferenceConfig()\n config.display()\n\n model = modellib.MaskRCNN(\n mode=\"inference\", model_dir=MODEL_DIR, config=config\n )\n model.load_weights(COCO_MODEL_PATH, by_name=True)\n class_names = [\n 'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light',\n 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',\n 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',\n 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush'\n ]\n\n capture = cv2.VideoCapture(0)\n\n # these 2 lines can be removed if you dont have a 1080p camera.\n capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n\n while True:\n ret, frame = capture.read()\n results = model.detect([frame], verbose=0)\n r = results[0]\n frame = display_instances(\n frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']\n )\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n capture.release()\n cv2.destroyAllWindows()\n"
] | [
[
"numpy.random.rand",
"numpy.where",
"numpy.any",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gperkz12/PhysioNet_2020 | [
"953651c5354ee698302ba65a3cc55ca98b77e0ae"
] | [
"data_features.py"
] | [
"import numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport scipy.io as sio\nimport pickle as pk\nfrom scipy.signal import butter, lfilter\nfrom scipy import stats\n\n\ndef detect_peaks(ecg_measurements,signal_frequency,gain):\n\n \"\"\"\n Method responsible for extracting peaks from loaded ECG measurements data through measurements processing.\n\n This implementation of a QRS Complex Detector is by no means a certified medical tool and should not be used in health monitoring.\n It was created and used for experimental purposes in psychophysiology and psychology.\n You can find more information in module documentation:\n https://github.com/c-labpl/qrs_detector\n If you use these modules in a research project, please consider citing it:\n https://zenodo.org/record/583770\n If you use these modules in any other project, please refer to MIT open-source license.\n\n If you have any question on the implementation, please refer to:\n\n Michal Sznajder (Jagiellonian University) - technical contact ([email protected])\n Marta lukowska (Jagiellonian University)\n Janko Slavic peak detection algorithm and implementation.\n https://github.com/c-labpl/qrs_detector\n https://github.com/jankoslavic/py-tools/tree/master/findpeaks\n\n MIT License\n Copyright (c) 2017 Michal Sznajder, Marta Lukowska\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n \"\"\"\n\n\n filter_lowcut = 0.001\n filter_highcut = 15.0\n filter_order = 1\n integration_window = 30 # Change proportionally when adjusting frequency (in samples).\n findpeaks_limit = 0.35\n findpeaks_spacing = 100 # Change proportionally when adjusting frequency (in samples).\n refractory_period = 240 # Change proportionally when adjusting frequency (in samples).\n qrs_peak_filtering_factor = 0.125\n noise_peak_filtering_factor = 0.125\n qrs_noise_diff_weight = 0.25\n\n\n # Detection results.\n qrs_peaks_indices = np.array([], dtype=int)\n noise_peaks_indices = np.array([], dtype=int)\n\n\n # Measurements filtering - 0-15 Hz band pass filter.\n filtered_ecg_measurements = bandpass_filter(ecg_measurements, lowcut=filter_lowcut, highcut=filter_highcut, signal_freq=signal_frequency, filter_order=filter_order)\n\n filtered_ecg_measurements[:5] = filtered_ecg_measurements[5]\n\n # Derivative - provides QRS slope information.\n differentiated_ecg_measurements = np.ediff1d(filtered_ecg_measurements)\n\n # Squaring - intensifies values received in derivative.\n squared_ecg_measurements = differentiated_ecg_measurements ** 2\n\n # Moving-window integration.\n integrated_ecg_measurements = np.convolve(squared_ecg_measurements, np.ones(integration_window)/integration_window)\n\n # Fiducial mark - peak detection on integrated measurements.\n detected_peaks_indices = findpeaks(data=integrated_ecg_measurements,\n limit=findpeaks_limit,\n spacing=findpeaks_spacing)\n\n detected_peaks_values = integrated_ecg_measurements[detected_peaks_indices]\n\n return detected_peaks_values,detected_peaks_indices\n\n\ndef bandpass_filter(data, lowcut, highcut, signal_freq, filter_order):\n \"\"\"\n Method responsible for creating and applying Butterworth filter.\n :param deque data: raw data\n :param float lowcut: filter lowcut frequency value\n :param float highcut: filter highcut frequency value\n :param int signal_freq: signal frequency in samples per second (Hz)\n :param int filter_order: filter order\n :return array: filtered data\n \"\"\"\n nyquist_freq = 0.5 * signal_freq\n low = lowcut / nyquist_freq\n high = highcut / nyquist_freq\n b, a = butter(filter_order, [low, high], btype=\"band\")\n y = lfilter(b, a, data)\n return y\n\ndef findpeaks(data, spacing=1, limit=None):\n \"\"\"\n Janko Slavic peak detection algorithm and implementation.\n https://github.com/jankoslavic/py-tools/tree/master/findpeaks\n Finds peaks in `data` which are of `spacing` width and >=`limit`.\n :param ndarray data: data\n :param float spacing: minimum spacing to the next peak (should be 1 or more)\n :param float limit: peaks should have value greater or equal\n :return array: detected peaks indexes array\n \"\"\"\n len = data.size\n x = np.zeros(len + 2 * spacing)\n x[:spacing] = data[0] - 1.e-6\n x[-spacing:] = data[-1] - 1.e-6\n x[spacing:spacing + len] = data\n peak_candidate = np.zeros(len)\n peak_candidate[:] = True\n for s in range(spacing):\n start = spacing - s - 1\n h_b = x[start: start + len] # before\n start = spacing\n h_c = x[start: start + len] # central\n start = spacing + s + 1\n h_a = x[start: start + len] # after\n peak_candidate = np.logical_and(peak_candidate, np.logical_and(h_c > h_b, h_c > h_a))\n\n ind = np.argwhere(peak_candidate)\n ind = ind.reshape(ind.size)\n if limit is not None:\n ind = ind[data[ind] > limit]\n return ind\n\n\n\ndef get_12ECG_features_r1(data, header_data):\n \"\"\"Extract features from ecg data\"\"\"\n\n tmp_hea = header_data[0].split(' ')\n ptID = tmp_hea[0]\n num_leads = int(tmp_hea[1])\n sample_Fs= int(tmp_hea[2])\n gain_lead = np.zeros(num_leads)\n\n for ii in range(num_leads):\n tmp_hea = header_data[ii+1].split(' ')\n gain_lead[ii] = int(tmp_hea[2].split('/')[0])\n\n # for testing, we included the mean age of 57 if the age is a NaN\n # This value will change as more data is being released\n for iline in header_data:\n if iline.startswith('#Age'):\n tmp_age = iline.split(': ')[1].strip()\n age = int(tmp_age if tmp_age != 'NaN' else 57)\n elif iline.startswith('#Sex'):\n tmp_sex = iline.split(': ')[1]\n if tmp_sex.strip()=='Female':\n sex =1\n else:\n sex=0\n elif iline.startswith('#Dx'):\n label = iline.split(': ')[1].split(',')[0]\n\n\n\n# We are only using data from lead1\n peaks,idx = detect_peaks(data[0],sample_Fs,gain_lead[0])\n\n# mean\n mean_RR = np.mean(idx/sample_Fs*1000)\n mean_Peaks = np.mean(peaks*gain_lead[0])\n\n# median\n median_RR = 0\n median_Peaks = np.median(peaks*gain_lead[0])\n\n# standard deviation\n std_RR = np.std(idx/sample_Fs*1000)\n std_Peaks = np.std(peaks*gain_lead[0])\n\n# variance\n var_RR = stats.tvar(idx/sample_Fs*1000)\n var_Peaks = stats.tvar(peaks*gain_lead[0])\n\n# Skewness\n skew_RR = stats.skew(idx/sample_Fs*1000)\n skew_Peaks = stats.skew(peaks*gain_lead[0])\n\n# Kurtosis\n kurt_RR = stats.kurtosis(idx/sample_Fs*1000)\n kurt_Peaks = stats.kurtosis(peaks*gain_lead[0])\n\n# Add more features: PCA and Sparse Coding\n\n# PCA\n # Load up X_test, pca and sc\n # pca = pk.load(open(\"pca.pkl\", 'rb'))\n # sc = pk.load(open(\"sc.pkl\", 'rb'))\n\n # Implement the testing\n # X_std_test = sc.transform(X_test)\n # X_pca_test = pca.transform(X_std_test)\n\n\n\n features = np.hstack([age,sex,mean_RR,mean_Peaks,median_RR,median_Peaks,std_RR,std_Peaks,var_RR,var_Peaks,skew_RR,skew_Peaks,kurt_RR,kurt_Peaks,])\n\n\n return features\n"
] | [
[
"numpy.hstack",
"numpy.logical_and",
"numpy.ediff1d",
"scipy.stats.tvar",
"numpy.median",
"numpy.argwhere",
"numpy.ones",
"scipy.signal.butter",
"numpy.std",
"numpy.mean",
"scipy.stats.skew",
"scipy.stats.kurtosis",
"scipy.signal.lfilter",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
dustrider/python_ai | [
"16d9806a06ed0f4ba1fe638458caa37343482e0a"
] | [
"unsupervised_learning/est_cluster_silhouette_score/app.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\n\n# Load data from input file\nx = np.loadtxt('data_quality.txt', delimiter=',')\n# Initialize variables\nscores = []\nvalues = np.arange(2, 10)\n# Iterate through the defined range\nfor num_clusters in values:\n # Train the KMeans clustering model\n kmeans = KMeans(init='k-means++', n_clusters=num_clusters, n_init=10)\n kmeans.fit(x)\n score = metrics.silhouette_score(x, kmeans.labels_, metric='euclidean', sample_size=len(x))\n print(\"\\nNumber of clusters =\", num_clusters)\n print(\"Silhouette score =\", score)\n scores.append(score)\n\n# Plot silhouette scores\nplt.figure()\nplt.bar(values, scores, width=0.7, color='black', align='center')\nplt.title('Silhouette score vs number of clusters')\nplt.show()\n\n# Extract best score and optimal number of clusters\nnum_clusters = np.argmax(scores) + values[0]\nprint('\\nOptimal number of clusters =', num_clusters)\n\n# Plot data\nplt.figure()\nplt.scatter(x[:, 0], x[:, 1], color='black', s=80, marker='o', facecolors='none')\nx_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1\ny_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1\nplt.title('Input data')\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\nplt.xticks(())\nplt.yticks(())\nplt.show()\n"
] | [
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"sklearn.cluster.KMeans",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"numpy.argmax",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KendallPark/buster | [
"27d996b383cc2e0ccfcc8d237aa0fdb8a2499400"
] | [
"buster/sampler.py"
] | [
"from typing import Callable\n\nfrom IPython import embed\n\nimport pandas as pd\nimport numpy.typing as npt\n\nfrom buster.metrics import gowers\nfrom skopt import sampler, optimizer\n\nimport skopt.space as sp\n\nfrom typing import Optional, Text, Union\n\nfrom sklearn.utils import check_random_state\n\nfrom sklearn import neighbors\n\nimport numpy as np\n\n\n# TODO: refactor to depend on a mixin--not a subclass of optimizers\nclass AdaptiveSampler(optimizer.Optimizer):\n\n def __init__(self,\n dimensions,\n base_estimator=\"dummy\",\n n_random_starts=None,\n n_initial_points=None,\n initial_point_generator=\"lhs\",\n n_jobs=1,\n acq_func=\"gp_hedge\",\n acq_optimizer=\"auto\",\n random_state=None,\n model_queue_size=None,\n acq_func_kwargs=None,\n acq_optimizer_kwargs=None):\n\n if n_initial_points is None:\n n_initial_points = len(dimensions) * 10\n\n super().__init__(dimensions,\n base_estimator=base_estimator,\n n_random_starts=n_random_starts,\n n_initial_points=n_initial_points,\n initial_point_generator=initial_point_generator,\n n_jobs=n_jobs,\n acq_func=acq_func,\n acq_optimizer=acq_optimizer,\n random_state=random_state,\n model_queue_size=model_queue_size,\n acq_func_kwargs=acq_func_kwargs,\n acq_optimizer_kwargs=acq_optimizer_kwargs)\n\n def ask(self, n_points=None, strategy=\"ldn\"):\n\n if len(self.Xi) == 0 and self._initial_samples is not None:\n return self._initial_samples\n elif len(self.Xi) == 0:\n raise ValueError(\"need initial points to do inference on\")\n\n if n_points is None:\n n_points = self.space.n_dims * 10\n\n supported_strategies = [\"ldn\"]\n\n # embed()\n\n if not (isinstance(n_points, int) and n_points > 0):\n raise ValueError(\"n_points should be int > 0, got \" + str(n_points))\n\n if strategy not in supported_strategies:\n raise ValueError(\"Expected parallel_strategy to be one of \" +\n str(supported_strategies) + \", \" + \"got %s\" % strategy)\n\n precomputed_dists = gowers.gowers_distance(self.Xi, self.Xi, self.space)\n\n # TODO: cache distance calculations so they don't repeat\n # self.cache_ = {len(self.Xi): precomputed_dists} # cache_ the result\n\n neighborhood = k_largest_diverse_neighborhood(\n precomputed_dists, self.yi, n_neighbors=self.space.n_dims * 2, k=1)[0]\n\n original_shape = np.shape(neighborhood)\n\n neighbors = np.array(self.Xi)[neighborhood.flatten()]\n\n # TODO: find cleaner way to do this\n neighbors[:, self._cat_inds] = 0\n neighbors = neighbors.astype(float)\n\n mins = neighbors.min(axis=0)\n maxes = neighbors.max(axis=0)\n\n new_dimensions = []\n for index, dimension in enumerate(self.space.dimensions):\n if isinstance(dimension, sp.Categorical):\n new_dimensions.append(dimension)\n continue\n new_dimensions.append(dimension.__class__(mins[index], maxes[index]))\n\n return self._initial_point_generator.generate(new_dimensions,\n n_points,\n random_state=self.rng)\n\n\ndef k_largest_diverse_neighborhood(X: npt.ArrayLike,\n y: npt.ArrayLike,\n n_neighbors: int = 1,\n k: int = 1,\n metric: Text = \"precomputed\",\n threshold: float = 0.01):\n # n_neighbors = np.shape(X)[0]/\n if metric == \"precomputed\":\n nn = neighbors.NearestNeighbors(n_neighbors=n_neighbors,\n metric=\"precomputed\")\n nn.fit(X)\n neigh_dist, neigh_ind = nn.kneighbors()\n\n y = np.array(y)\n\n original_shape = np.shape(neigh_ind)\n\n neigh_labels = y[neigh_ind.flatten()].reshape(original_shape)\n\n diversity = (y[..., np.newaxis] ^\n neigh_labels.astype(int)).sum(axis=1) / (n_neighbors + 1)\n\n # average_distance = np.mean(neigh_dist, axis=1)\n max_distance = np.max(neigh_dist, axis=1)\n\n max_distance[max_distance < threshold] = 0\n\n score = diversity * max_distance\n\n top_indices = np.argsort(score)[-k:][::-1]\n\n return np.append(top_indices[..., np.newaxis], neigh_ind[top_indices], 1)\n else:\n # TODO: refactor with BallTree, waiting on\n # https://github.com/scikit-learn/scikit-learn/pull/16834\n raise NotImplementedError\n"
] | [
[
"numpy.max",
"numpy.append",
"numpy.shape",
"numpy.argsort",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Khoronus/researchM-lightweight-human-pose-estimation.pytorch | [
"8a66f5fd4c21205fc2e47451fc3e06daeeb77408"
] | [
"test-dataset.py"
] | [
"import argparse\nimport cv2\nimport os\n\nimport torch\nfrom torch.nn import DataParallel\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\nfrom datasets.coco import CocoTrainDataset\nfrom datasets.transformationsV3 import ConvertKeypoints, Scale, Rotate, CropPad, CropPad3, Flip\nfrom modules.get_parameters import get_parameters_conv, get_parameters_bn, get_parameters_conv_depthwise\nfrom models.with_mobilenet import PoseEstimationWithMobileNet\nfrom modules.loss import l2_loss\nfrom modules.load_state import load_state, load_from_mobilenet\nfrom val import evaluate\nimport numpy as np\n\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False) # To prevent freeze of DataLoader\n\n\ndef test_dataset(prepared_train_labels, train_images_folder, num_refinement_stages, base_lr, batch_size, batches_per_iter,\n num_workers, checkpoint_path, weights_only, from_mobilenet, checkpoints_folder, log_after,\n val_labels, val_images_folder, val_output_name, checkpoint_after, val_after):\n\n stride = 8\n sigma = 7\n path_thickness = 1\n dataset = CocoTrainDataset(prepared_train_labels, train_images_folder,\n stride, sigma, path_thickness,\n transform=transforms.Compose([\n ConvertKeypoints(),\n Scale(),\n Rotate(pad=(128, 128, 128)),\n CropPad3(pad=(128, 128, 128)),\n Flip()]))\n #dataset = CocoTrainDataset(prepared_train_labels, train_images_folder,\n # stride, sigma, path_thickness,\n # transform=transforms.Compose([\n # ConvertKeypoints(),\n # Scale(),\n # Rotate(pad=(128, 128, 128)),\n # CropPad(pad=(128, 128, 128),center_perterb_max=40, crop_x=1920, crop_y=1920),\n # Flip()]))\n #dataset = CocoTrainDataset(prepared_train_labels, train_images_folder,\n # stride, sigma, path_thickness,\n # transform=transforms.Compose([\n # ConvertKeypoints(),\n # CropPad2(pad=(128, 128, 128)),\n # Flip()]))\n\n for i in range (0, 30):\n batch_data = dataset.__getitem__(i)\n\n print('batch data: {}'.format(batch_data))\n\n images = batch_data['image']\n keypoint_masks = batch_data['keypoint_mask']\n paf_masks = batch_data['paf_mask']\n keypoint_maps = batch_data['keypoint_maps']\n paf_maps = batch_data['paf_maps']\n\n print('images shape: {}'.format(images.shape))\n\n images = np.moveaxis(images, [0, 2], [2, 0]) \n #print('image shape: {}'.format(image.shape))\n cv2.imwrite(\"imgage_tmp.jpg\", images * 255)\n #print('keypoint_masks: {}'.format(keypoint_masks.shape))\n #print('keypoint_maps: {}'.format(keypoint_maps.shape))\n #for j in range(0, 19):\n # mask = keypoint_masks[0,j,:,:].cpu().numpy()\n # cv2.imwrite('mask_tmp_'+str(j)+'.jpg', mask * 255) \n #for j in range(0, 19):\n # mask = keypoint_maps[0,j,:,:].cpu().numpy()\n # cv2.imwrite('keypoint_maps_tmp_'+str(j)+'.jpg', mask * 255) \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prepared-train-labels', type=str, required=True,\n help='path to the file with prepared annotations')\n parser.add_argument('--train-images-folder', type=str, required=True, help='path to COCO train images folder')\n parser.add_argument('--num-refinement-stages', type=int, default=1, help='number of refinement stages')\n parser.add_argument('--base-lr', type=float, default=4e-5, help='initial learning rate')\n parser.add_argument('--batch-size', type=int, default=80, help='batch size')\n parser.add_argument('--batches-per-iter', type=int, default=1, help='number of batches to accumulate gradient from')\n parser.add_argument('--num-workers', type=int, default=8, help='number of workers')\n parser.add_argument('--checkpoint-path', type=str, required=True, help='path to the checkpoint to continue training from')\n parser.add_argument('--from-mobilenet', action='store_true',\n help='load weights from mobilenet feature extractor')\n parser.add_argument('--weights-only', action='store_true',\n help='just initialize layers with pre-trained weights and start training from the beginning')\n parser.add_argument('--experiment-name', type=str, default='default',\n help='experiment name to create folder for checkpoints')\n parser.add_argument('--log-after', type=int, default=100, help='number of iterations to print train loss')\n\n parser.add_argument('--val-labels', type=str, required=True, help='path to json with keypoints val labels')\n parser.add_argument('--val-images-folder', type=str, required=True, help='path to COCO val images folder')\n parser.add_argument('--val-output-name', type=str, default='detections.json',\n help='name of output json file with detected keypoints')\n parser.add_argument('--checkpoint-after', type=int, default=5000,\n help='number of iterations to save checkpoint')\n parser.add_argument('--val-after', type=int, default=5000,\n help='number of iterations to run validation')\n args = parser.parse_args()\n\n checkpoints_folder = '{}_checkpoints'.format(args.experiment_name)\n if not os.path.exists(checkpoints_folder):\n os.makedirs(checkpoints_folder)\n\n test_dataset(args.prepared_train_labels, args.train_images_folder, args.num_refinement_stages, args.base_lr, args.batch_size,\n args.batches_per_iter, args.num_workers, args.checkpoint_path, args.weights_only, args.from_mobilenet,\n checkpoints_folder, args.log_after, args.val_labels, args.val_images_folder, args.val_output_name,\n args.checkpoint_after, args.val_after)\n"
] | [
[
"numpy.moveaxis"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sethupavan12/tensorflow | [
"505f62290840d952397340a98967ed1889caf5ab"
] | [
"tensorflow/python/keras/saving/saved_model/utils.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility functions shared between SavedModel saving/loading implementations.\"\"\"\n\nimport itertools\nimport threading\nimport types\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.utils import control_flow_util\nfrom tensorflow.python.keras.utils import tf_contextlib\nfrom tensorflow.python.keras.utils import tf_inspect\nfrom tensorflow.python.keras.utils.generic_utils import LazyLoader\nfrom tensorflow.python.util import tf_decorator\n\n\n# pylint:disable=g-inconsistent-quotes\ntraining_lib = LazyLoader(\n \"training_lib\", globals(),\n \"tensorflow.python.keras.engine.training\")\n# pylint:enable=g-inconsistent-quotes\n\n\ndef use_wrapped_call(layer, call_fn, default_training_value=None,\n return_method=False):\n \"\"\"Creates fn that adds the losses returned by call_fn & returns the outputs.\n\n Args:\n layer: A Keras layer object\n call_fn: tf.function that takes layer inputs (and possibly a training arg),\n and returns a tuple of (outputs, list of losses).\n default_training_value: Default value of the training kwarg. If `None`, the\n default is `K.learning_phase()`.\n return_method: Whether to return a method bound to the layer.\n\n Returns:\n function that calls call_fn and returns the outputs. Losses returned by\n call_fn are added to the layer losses.\n \"\"\"\n expects_training_arg = layer_uses_training_bool(layer)\n if hasattr(call_fn, 'original_layer_call'): # call_fn is a LayerCall object\n original_call = call_fn.original_layer_call\n # In Python 3, callable objects are not compatible with inspect.getargspec\n call_fn = call_fn.__call__\n else:\n original_call = call_fn\n fn, arg_spec = maybe_add_training_arg(\n original_call, call_fn, expects_training_arg, default_training_value)\n\n def return_outputs_and_add_losses(*args, **kwargs):\n \"\"\"Returns the outputs from the layer call function, and adds the losses.\"\"\"\n if return_method:\n args = args[1:]\n\n outputs, losses = fn(*args, **kwargs)\n layer.add_loss(losses, inputs=True)\n\n # TODO(kathywu): This is a temporary hack. When a network of layers is\n # revived from SavedModel, only the top-level layer will have losses. This\n # causes issues in eager mode because the child layers may have graph losses\n # (thus model.losses returns a mix of Eager and graph tensors). To fix this,\n # whenever eager losses are added to one layer, add eager losses to all\n # child layers. This causes `.losses` to only return eager losses.\n # pylint: disable=protected-access\n if context.executing_eagerly():\n for i in layer._flatten_layers():\n if i is not layer:\n i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER]\n # pylint: enable=protected-access\n return outputs\n\n decorated = tf_decorator.make_decorator(\n target=call_fn,\n decorator_func=return_outputs_and_add_losses,\n decorator_argspec=arg_spec)\n\n if return_method:\n return types.MethodType(decorated, layer)\n else:\n return decorated\n\n\ndef layer_uses_training_bool(layer):\n \"\"\"Returns whether this layer or any of its children uses the training arg.\"\"\"\n if layer._expects_training_arg: # pylint: disable=protected-access\n return True\n visited = {layer}\n to_visit = list_all_layers(layer)\n while to_visit:\n layer = to_visit.pop()\n if layer in visited:\n continue\n if getattr(layer, '_expects_training_arg', True):\n return True\n visited.add(layer)\n to_visit.extend(list_all_layers(layer))\n return False\n\n\ndef list_all_layers(obj):\n if isinstance(obj, training_lib.Model):\n # Handle special case of Sequential, which doesn't return\n # the `Input` layer.\n return obj.layers\n else:\n return list(obj._flatten_layers(include_self=False, recursive=False)) # pylint: disable=protected-access\n\n\ndef list_all_layers_and_sublayers(obj):\n s = set([obj])\n s.update(itertools.chain.from_iterable(\n list_all_layers_and_sublayers(layer) for layer in list_all_layers(obj)))\n return s\n\n\ndef maybe_add_training_arg(\n original_call, wrapped_call, expects_training_arg, default_training_value):\n \"\"\"Decorate call and optionally adds training argument.\n\n If a layer expects a training argument, this function ensures that 'training'\n is present in the layer args or kwonly args, with the default training value.\n\n Args:\n original_call: Original call function.\n wrapped_call: Wrapped call function.\n expects_training_arg: Whether to include 'training' argument.\n default_training_value: Default value of the training kwarg to include in\n the arg spec. If `None`, the default is `K.learning_phase()`.\n\n Returns:\n Tuple of (\n function that calls `wrapped_call` and sets the training arg,\n Argspec of returned function or `None` if the argspec is unchanged)\n \"\"\"\n if not expects_training_arg:\n return wrapped_call, None\n def wrap_with_training_arg(*args, **kwargs):\n \"\"\"Wrap the `wrapped_call` function, and set training argument.\"\"\"\n training_arg_index = get_training_arg_index(original_call)\n training = get_training_arg(training_arg_index, args, kwargs)\n if training is None:\n training = default_training_value or K.learning_phase()\n\n args = list(args)\n kwargs = kwargs.copy()\n\n def replace_training_and_call(training):\n set_training_arg(training, training_arg_index, args, kwargs)\n return wrapped_call(*args, **kwargs)\n\n return control_flow_util.smart_cond(\n training, lambda: replace_training_and_call(True),\n lambda: replace_training_and_call(False))\n\n # Create arg spec for decorated function. If 'training' is not defined in the\n # args of the original arg spec, then add it to kwonlyargs.\n arg_spec = tf_inspect.getfullargspec(original_call)\n defaults = list(arg_spec.defaults) if arg_spec.defaults is not None else []\n\n kwonlyargs = arg_spec.kwonlyargs\n kwonlydefaults = arg_spec.kwonlydefaults or {}\n # Add training arg if it does not exist, or set the default training value.\n if 'training' not in arg_spec.args:\n kwonlyargs.append('training')\n kwonlydefaults['training'] = default_training_value\n else:\n index = arg_spec.args.index('training')\n training_default_index = len(arg_spec.args) - index\n if (arg_spec.defaults and\n len(arg_spec.defaults) >= training_default_index and\n defaults[-training_default_index] is None):\n defaults[-training_default_index] = default_training_value\n\n decorator_argspec = tf_inspect.FullArgSpec(\n args=arg_spec.args,\n varargs=arg_spec.varargs,\n varkw=arg_spec.varkw,\n defaults=defaults,\n kwonlyargs=kwonlyargs,\n kwonlydefaults=kwonlydefaults,\n annotations=arg_spec.annotations)\n return wrap_with_training_arg, decorator_argspec\n\n\ndef get_training_arg_index(call_fn):\n \"\"\"Returns the index of 'training' in the layer call function arguments.\n\n Args:\n call_fn: Call function.\n\n Returns:\n - n: index of 'training' in the call function arguments.\n - -1: if 'training' is not found in the arguments, but layer.call accepts\n variable keyword arguments\n - None: if layer doesn't expect a training argument.\n \"\"\"\n argspec = tf_inspect.getfullargspec(call_fn)\n if argspec.varargs:\n # When there are variable args, training must be a keyword arg.\n if 'training' in argspec.kwonlyargs or argspec.varkw:\n return -1\n return None\n else:\n # Try to find 'training' in the list of args or kwargs.\n arg_list = argspec.args\n if tf_inspect.ismethod(call_fn):\n arg_list = arg_list[1:]\n\n if 'training' in arg_list:\n return arg_list.index('training')\n elif 'training' in argspec.kwonlyargs or argspec.varkw:\n return -1\n return None\n\n\ndef set_training_arg(training, index, args, kwargs):\n if index is None or index < 0 or len(args) <= index: # index is invalid\n kwargs['training'] = training\n else:\n args[index] = training\n return args, kwargs\n\n\ndef get_training_arg(index, args, kwargs):\n if index is None or index < 0 or len(args) <= index: # index is invalid\n return kwargs.get('training', None)\n else:\n return args[index]\n\n\ndef remove_training_arg(index, args, kwargs):\n if index is None or index < 0 or len(args) <= index: # index is invalid\n kwargs.pop('training', None)\n else:\n args.pop(index)\n\n\nclass SaveOptionsContext(threading.local):\n\n def __init__(self):\n super(SaveOptionsContext, self).__init__()\n self.save_traces = True\n\n\n_save_options_context = SaveOptionsContext()\n\n\n@tf_contextlib.contextmanager\ndef keras_option_scope(save_traces):\n previous_value = _save_options_context.save_traces\n try:\n _save_options_context.save_traces = save_traces\n yield\n finally:\n _save_options_context.save_traces = previous_value\n\n\ndef should_save_traces():\n \"\"\"Whether to trace layer functions-can be disabled in the save_traces arg.\"\"\"\n return _save_options_context.save_traces\n\n"
] | [
[
"tensorflow.python.keras.utils.tf_inspect.ismethod",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.keras.utils.tf_inspect.FullArgSpec",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.utils.tf_inspect.getfullargspec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
TomBourgeade/allennlp | [
"357691546bf65fc464bd0b564749bfa97e6db9aa"
] | [
"allennlp/modules/token_embedders/pretrained_transformer_embedder.py"
] | [
"import logging\nimport math\nfrom typing import Optional, Tuple, Dict, Any\n\nfrom overrides import overrides\n\nimport torch\nimport torch.nn.functional as F\nfrom transformers import XLNetConfig\n\nfrom allennlp.data.tokenizers import PretrainedTransformerTokenizer\nfrom allennlp.modules.scalar_mix import ScalarMix\nfrom allennlp.modules.token_embedders.token_embedder import TokenEmbedder\nfrom allennlp.nn.util import batched_index_select\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\"pretrained_transformer\")\nclass PretrainedTransformerEmbedder(TokenEmbedder):\n \"\"\"\n Uses a pretrained model from `transformers` as a `TokenEmbedder`.\n\n Registered as a `TokenEmbedder` with name \"pretrained_transformer\".\n\n # Parameters\n\n model_name : `str`\n The name of the `transformers` model to use. Should be the same as the corresponding\n `PretrainedTransformerIndexer`.\n max_length : `int`, optional (default = `None`)\n If positive, folds input token IDs into multiple segments of this length, pass them\n through the transformer model independently, and concatenate the final representations.\n Should be set to the same value as the `max_length` option on the\n `PretrainedTransformerIndexer`.\n sub_module: `str`, optional (default = `None`)\n The name of a submodule of the transformer to be used as the embedder. Some transformers naturally act\n as embedders such as BERT. However, other models consist of encoder and decoder, in which case we just\n want to use the encoder.\n train_parameters: `bool`, optional (default = `True`)\n If this is `True`, the transformer weights get updated during training. If this is `False`, the\n transformer weights are not updated during training.\n eval_mode: `bool`, optional (default = `False`)\n If this is `True`, the model is always set to evaluation mode (e.g., the dropout is disabled and the\n batch normalization layer statistics are not updated). If this is `False`, such dropout and batch\n normalization layers are only set to evaluation mode when when the model is evaluating on development\n or test data.\n last_layer_only: `bool`, optional (default = `True`)\n When `True` (the default), only the final layer of the pretrained transformer is taken\n for the embeddings. But if set to `False`, a scalar mix of all of the layers\n is used.\n gradient_checkpointing: `bool`, optional (default = `None`)\n Enable or disable gradient checkpointing.\n tokenizer_kwargs: `Dict[str, Any]`, optional (default = `None`)\n Dictionary with\n [additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/tokenization_utils.py#L691)\n for `AutoTokenizer.from_pretrained`.\n transformer_kwargs: `Dict[str, Any]`, optional (default = `None`)\n Dictionary with\n [additional arguments](https://github.com/huggingface/transformers/blob/155c782a2ccd103cf63ad48a2becd7c76a7d2115/transformers/modeling_utils.py#L253)\n for `AutoModel.from_pretrained`.\n \"\"\" # noqa: E501\n\n authorized_missing_keys = [r\"position_ids$\"]\n\n def __init__(\n self,\n model_name: str,\n *,\n max_length: int = None,\n sub_module: str = None,\n train_parameters: bool = True,\n eval_mode: bool = False,\n last_layer_only: bool = True,\n override_weights_file: Optional[str] = None,\n override_weights_strip_prefix: Optional[str] = None,\n gradient_checkpointing: Optional[bool] = None,\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n transformer_kwargs: Optional[Dict[str, Any]] = None,\n ) -> None:\n super().__init__()\n from allennlp.common import cached_transformers\n\n self.transformer_model = cached_transformers.get(\n model_name,\n True,\n override_weights_file=override_weights_file,\n override_weights_strip_prefix=override_weights_strip_prefix,\n **(transformer_kwargs or {}),\n )\n\n if gradient_checkpointing is not None:\n self.transformer_model.config.update({\"gradient_checkpointing\": gradient_checkpointing})\n\n self.config = self.transformer_model.config\n if sub_module:\n assert hasattr(self.transformer_model, sub_module)\n self.transformer_model = getattr(self.transformer_model, sub_module)\n self._max_length = max_length\n\n # I'm not sure if this works for all models; open an issue on github if you find a case\n # where it doesn't work.\n self.output_dim = self.config.hidden_size\n\n self._scalar_mix: Optional[ScalarMix] = None\n if not last_layer_only:\n self._scalar_mix = ScalarMix(self.config.num_hidden_layers)\n self.config.output_hidden_states = True\n\n tokenizer = PretrainedTransformerTokenizer(\n model_name,\n tokenizer_kwargs=tokenizer_kwargs,\n )\n\n try:\n if self.transformer_model.get_input_embeddings().num_embeddings != len(\n tokenizer.tokenizer\n ):\n self.transformer_model.resize_token_embeddings(len(tokenizer.tokenizer))\n except NotImplementedError:\n # Can't resize for transformers models that don't implement base_model.get_input_embeddings()\n logger.warning(\n \"Could not resize the token embedding matrix of the transformer model. \"\n \"This model does not support resizing.\"\n )\n\n self._num_added_start_tokens = len(tokenizer.single_sequence_start_tokens)\n self._num_added_end_tokens = len(tokenizer.single_sequence_end_tokens)\n self._num_added_tokens = self._num_added_start_tokens + self._num_added_end_tokens\n\n self.train_parameters = train_parameters\n if not train_parameters:\n for param in self.transformer_model.parameters():\n param.requires_grad = False\n\n self.eval_mode = eval_mode\n if eval_mode:\n self.transformer_model.eval()\n\n @overrides\n def train(self, mode: bool = True):\n self.training = mode\n for name, module in self.named_children():\n if self.eval_mode and name == \"transformer_model\":\n module.eval()\n else:\n module.train(mode)\n return self\n\n @overrides\n def get_output_dim(self):\n return self.output_dim\n\n def _number_of_token_type_embeddings(self):\n if isinstance(self.config, XLNetConfig):\n return 3 # XLNet has 3 type ids\n elif hasattr(self.config, \"type_vocab_size\"):\n return self.config.type_vocab_size\n else:\n return 0\n\n @overrides\n def forward(\n self,\n token_ids: torch.LongTensor,\n mask: torch.BoolTensor,\n type_ids: Optional[torch.LongTensor] = None,\n segment_concat_mask: Optional[torch.BoolTensor] = None,\n ) -> torch.Tensor: # type: ignore\n \"\"\"\n # Parameters\n\n token_ids: `torch.LongTensor`\n Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.\n num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the\n middle, e.g. the length of: \"[CLS] A B C [SEP] [CLS] D E F [SEP]\" (see indexer logic).\n mask: `torch.BoolTensor`\n Shape: [batch_size, num_wordpieces].\n type_ids: `Optional[torch.LongTensor]`\n Shape: `[batch_size, num_wordpieces if max_length is None else num_segment_concat_wordpieces]`.\n segment_concat_mask: `Optional[torch.BoolTensor]`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n\n # Returns\n\n `torch.Tensor`\n Shape: `[batch_size, num_wordpieces, embedding_size]`.\n\n \"\"\"\n # Some of the huggingface transformers don't support type ids at all and crash when you supply\n # them. For others, you can supply a tensor of zeros, and if you don't, they act as if you did.\n # There is no practical difference to the caller, so here we pretend that one case is the same\n # as another case.\n if type_ids is not None:\n max_type_id = type_ids.max()\n if max_type_id == 0:\n type_ids = None\n else:\n if max_type_id >= self._number_of_token_type_embeddings():\n raise ValueError(\"Found type ids too large for the chosen transformer model.\")\n assert token_ids.shape == type_ids.shape\n\n fold_long_sequences = self._max_length is not None and token_ids.size(1) > self._max_length\n if fold_long_sequences:\n batch_size, num_segment_concat_wordpieces = token_ids.size()\n token_ids, segment_concat_mask, type_ids = self._fold_long_sequences(\n token_ids, segment_concat_mask, type_ids\n )\n\n transformer_mask = segment_concat_mask if self._max_length is not None else mask\n assert transformer_mask is not None\n # Shape: [batch_size, num_wordpieces, embedding_size],\n # or if self._max_length is not None:\n # [batch_size * num_segments, self._max_length, embedding_size]\n\n # We call this with kwargs because some of the huggingface models don't have the\n # token_type_ids parameter and fail even when it's given as None.\n # Also, as of transformers v2.5.1, they are taking FloatTensor masks.\n parameters = {\"input_ids\": token_ids, \"attention_mask\": transformer_mask.float()}\n if type_ids is not None:\n parameters[\"token_type_ids\"] = type_ids\n\n transformer_output = self.transformer_model(**parameters)\n if self._scalar_mix is not None:\n # The hidden states will also include the embedding layer, which we don't\n # include in the scalar mix. Hence the `[1:]` slicing.\n hidden_states = transformer_output.hidden_states[1:]\n embeddings = self._scalar_mix(hidden_states)\n else:\n embeddings = transformer_output.last_hidden_state\n\n if fold_long_sequences:\n embeddings = self._unfold_long_sequences(\n embeddings, segment_concat_mask, batch_size, num_segment_concat_wordpieces\n )\n\n return embeddings\n\n def _fold_long_sequences(\n self,\n token_ids: torch.LongTensor,\n mask: torch.BoolTensor,\n type_ids: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.LongTensor, torch.LongTensor, Optional[torch.LongTensor]]:\n \"\"\"\n We fold 1D sequences (for each element in batch), returned by `PretrainedTransformerIndexer`\n that are in reality multiple segments concatenated together, to 2D tensors, e.g.\n\n [ [CLS] A B C [SEP] [CLS] D E [SEP] ]\n -> [ [ [CLS] A B C [SEP] ], [ [CLS] D E [SEP] [PAD] ] ]\n The [PAD] positions can be found in the returned `mask`.\n\n # Parameters\n\n token_ids: `torch.LongTensor`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n num_segment_concat_wordpieces is num_wordpieces plus special tokens inserted in the\n middle, i.e. the length of: \"[CLS] A B C [SEP] [CLS] D E F [SEP]\" (see indexer logic).\n mask: `torch.BoolTensor`\n Shape: `[batch_size, num_segment_concat_wordpieces]`.\n The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`\n in `forward()`.\n type_ids: `Optional[torch.LongTensor]`\n Shape: [batch_size, num_segment_concat_wordpieces].\n\n # Returns:\n\n token_ids: `torch.LongTensor`\n Shape: [batch_size * num_segments, self._max_length].\n mask: `torch.BoolTensor`\n Shape: [batch_size * num_segments, self._max_length].\n \"\"\"\n num_segment_concat_wordpieces = token_ids.size(1)\n num_segments = math.ceil(num_segment_concat_wordpieces / self._max_length) # type: ignore\n padded_length = num_segments * self._max_length # type: ignore\n length_to_pad = padded_length - num_segment_concat_wordpieces\n\n def fold(tensor): # Shape: [batch_size, num_segment_concat_wordpieces]\n # Shape: [batch_size, num_segments * self._max_length]\n tensor = F.pad(tensor, [0, length_to_pad], value=0)\n # Shape: [batch_size * num_segments, self._max_length]\n return tensor.reshape(-1, self._max_length)\n\n return fold(token_ids), fold(mask), fold(type_ids) if type_ids is not None else None\n\n def _unfold_long_sequences(\n self,\n embeddings: torch.FloatTensor,\n mask: torch.BoolTensor,\n batch_size: int,\n num_segment_concat_wordpieces: int,\n ) -> torch.FloatTensor:\n \"\"\"\n We take 2D segments of a long sequence and flatten them out to get the whole sequence\n representation while remove unnecessary special tokens.\n\n [ [ [CLS]_emb A_emb B_emb C_emb [SEP]_emb ], [ [CLS]_emb D_emb E_emb [SEP]_emb [PAD]_emb ] ]\n -> [ [CLS]_emb A_emb B_emb C_emb D_emb E_emb [SEP]_emb ]\n\n We truncate the start and end tokens for all segments, recombine the segments,\n and manually add back the start and end tokens.\n\n # Parameters\n\n embeddings: `torch.FloatTensor`\n Shape: [batch_size * num_segments, self._max_length, embedding_size].\n mask: `torch.BoolTensor`\n Shape: [batch_size * num_segments, self._max_length].\n The mask for the concatenated segments of wordpieces. The same as `segment_concat_mask`\n in `forward()`.\n batch_size: `int`\n num_segment_concat_wordpieces: `int`\n The length of the original \"[ [CLS] A B C [SEP] [CLS] D E F [SEP] ]\", i.e.\n the original `token_ids.size(1)`.\n\n # Returns:\n\n embeddings: `torch.FloatTensor`\n Shape: [batch_size, self._num_wordpieces, embedding_size].\n \"\"\"\n\n def lengths_to_mask(lengths, max_len, device):\n return torch.arange(max_len, device=device).expand(\n lengths.size(0), max_len\n ) < lengths.unsqueeze(1)\n\n device = embeddings.device\n num_segments = int(embeddings.size(0) / batch_size)\n embedding_size = embeddings.size(2)\n\n # We want to remove all segment-level special tokens but maintain sequence-level ones\n num_wordpieces = num_segment_concat_wordpieces - (num_segments - 1) * self._num_added_tokens\n\n embeddings = embeddings.reshape(\n batch_size, num_segments * self._max_length, embedding_size # type: ignore\n )\n mask = mask.reshape(batch_size, num_segments * self._max_length) # type: ignore\n # We assume that all 1s in the mask precede all 0s, and add an assert for that.\n # Open an issue on GitHub if this breaks for you.\n # Shape: (batch_size,)\n seq_lengths = mask.sum(-1)\n if not (lengths_to_mask(seq_lengths, mask.size(1), device) == mask).all():\n raise ValueError(\n \"Long sequence splitting only supports masks with all 1s preceding all 0s.\"\n )\n # Shape: (batch_size, self._num_added_end_tokens); this is a broadcast op\n end_token_indices = (\n seq_lengths.unsqueeze(-1) - torch.arange(self._num_added_end_tokens, device=device) - 1\n )\n\n # Shape: (batch_size, self._num_added_start_tokens, embedding_size)\n start_token_embeddings = embeddings[:, : self._num_added_start_tokens, :]\n # Shape: (batch_size, self._num_added_end_tokens, embedding_size)\n end_token_embeddings = batched_index_select(embeddings, end_token_indices)\n\n embeddings = embeddings.reshape(batch_size, num_segments, self._max_length, embedding_size)\n embeddings = embeddings[\n :, :, self._num_added_start_tokens : embeddings.size(2) - self._num_added_end_tokens, :\n ] # truncate segment-level start/end tokens\n embeddings = embeddings.reshape(batch_size, -1, embedding_size) # flatten\n\n # Now try to put end token embeddings back which is a little tricky.\n\n # The number of segment each sequence spans, excluding padding. Mimicking ceiling operation.\n # Shape: (batch_size,)\n num_effective_segments = (seq_lengths + self._max_length - 1) // self._max_length\n # The number of indices that end tokens should shift back.\n num_removed_non_end_tokens = (\n num_effective_segments * self._num_added_tokens - self._num_added_end_tokens\n )\n # Shape: (batch_size, self._num_added_end_tokens)\n end_token_indices -= num_removed_non_end_tokens.unsqueeze(-1)\n assert (end_token_indices >= self._num_added_start_tokens).all()\n # Add space for end embeddings\n embeddings = torch.cat([embeddings, torch.zeros_like(end_token_embeddings)], 1)\n # Add end token embeddings back\n embeddings.scatter_(\n 1, end_token_indices.unsqueeze(-1).expand_as(end_token_embeddings), end_token_embeddings\n )\n\n # Now put back start tokens. We can do this before putting back end tokens, but then\n # we need to change `num_removed_non_end_tokens` a little.\n embeddings = torch.cat([start_token_embeddings, embeddings], 1)\n\n # Truncate to original length\n embeddings = embeddings[:, :num_wordpieces, :]\n return embeddings\n"
] | [
[
"torch.zeros_like",
"torch.arange",
"torch.nn.functional.pad",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alan-turing-institute/WimbledonPlanner | [
"ff73f2a52425d7855ebf224f6acc59fa99ff664b"
] | [
"wimbledon/wimbledon.py"
] | [
"import pandas as pd\nimport holidays\nfrom copy import deepcopy\nimport numpy as np\nimport warnings\n\nimport wimbledon.config\nimport wimbledon.harvest.db_interface\nfrom wimbledon.sql import query_db\n\n\ndef get_business_days(start_date, end_date):\n \"\"\"Get a daily time series between start_date and end_date\n excluding weekends and public holidays.\"\"\"\n\n date_range = pd.date_range(\n start=start_date, end=end_date, freq=pd.tseries.offsets.BDay()\n )\n\n # remove public holidays\n pub_hols = holidays.England()\n date_range = pd.to_datetime([date for date in date_range if date not in pub_hols])\n\n return date_range\n\n\ndef select_date_range(df, start_date, end_date, drop_zero_cols=True):\n \"\"\"Extract a range of dates from a dataframe with a datetime index,\n then remove any columns which are left empty (full of zeros).\"\"\"\n\n df_slice = df.copy()\n\n if start_date is not None:\n df_slice = df_slice[df_slice.index >= start_date]\n if end_date is not None:\n df_slice = df_slice[df_slice.index <= end_date]\n\n if drop_zero_cols:\n nonzero_cols = df_slice.columns[~(df_slice == 0).all()]\n df_slice = df_slice[nonzero_cols]\n\n return df_slice\n\n\nclass Wimbledon:\n def __init__(\n self,\n conn=None,\n update_db=False,\n with_tracked_time=True,\n work_hrs_per_day=None,\n proj_hrs_per_day=None,\n ):\n \"\"\"Load and group Wimbledon data.\n\n Keyword Arguments:\n update_db {bool} -- update the database before loading data (default: {False})\n work_hrs_per_day {numeric} -- hours in normal working day (default: 8)\n proj_hrs_per_day {numeric} -- nominal hours spent on projects per day (default: 6.4)\n conn {SQLAlchemy connection} -- connection to database (default: get from wimbledon config)\n with_tracked_time {bool} -- whether to load and process timesheet data (default: {True})\n \"\"\"\n if update_db:\n wimbledon.harvest.db_interface.update_db(\n conn=conn, with_tracked_time=with_tracked_time\n )\n\n data = query_db.get_data(conn=conn, with_tracked_time=with_tracked_time)\n self.people = data[\"people\"]\n self.people[\"capacity\"].fillna(0, inplace=True)\n self.projects = data[\"projects\"]\n self.assignments = data[\"assignments\"]\n self.clients = data[\"clients\"]\n self.associations = data[\"associations\"]\n\n start_date = self.assignments[\"start_date\"].min()\n end_date = self.assignments[\"end_date\"].max()\n\n if with_tracked_time:\n self.tasks = data[\"tasks\"]\n self.time_entries = data[\"time_entries\"]\n\n start_date = min([start_date, self.time_entries[\"date\"].min()])\n end_date = max([end_date, self.time_entries[\"date\"].max()])\n # people may track time on non-working days, so create a separate\n # time series for time tracking\n self.date_range_alldays = pd.date_range(\n start=start_date, end=end_date, freq=\"D\"\n )\n\n # Find the earliest and latest date in the data, create a range\n # of weekdays between these dates (so people will only have allocations\n # to projects on working days)\n # NB: this should take into account bank holidays, but not things like\n # British Library shutdown over Christmas.\n self.date_range_workdays = get_business_days(start_date, end_date)\n\n # 1 FTE hours per day\n self.work_hrs_per_day = 8 if work_hrs_per_day is None else work_hrs_per_day\n # hours per day nominally for projects\n self.proj_hrs_per_day = 6.4 if proj_hrs_per_day is None else proj_hrs_per_day\n # convert assignments in seconds per day to fractions of 1 FTE\n # (defined by self.work_hrs_per_day)\n self.assignments[\"allocation\"] = self.assignments[\"allocation\"] / (\n self.work_hrs_per_day * 60 * 60\n )\n\n # convert baseline capacity in seconds per week to fraction of 1 FTE\n self.people.capacity = self.people.capacity / (\n 5 * self.work_hrs_per_day * 60 * 60\n )\n\n # people_allocations: dict with key person_id, contains df of (date, project_id)\n # with allocation people_totals: df of (date, person_id) with total allocations\n self.people_allocations, self.people_totals = self._get_allocations(\"person\")\n\n # people required, unconfirmed, deferred allocations\n self.peoplereq_allocations = self.get_person_allocations(\"PEOPLE REQUIRED\")\n self.unconfirmed_allocations = self.get_person_allocations(\"UNCONFIRMED\")\n self.deferred_allocations = self.get_person_allocations(\"DEFERRED\")\n\n # calculate team capacity: capacity in people table minus any allocations to\n # unavailable project\n self.people_capacities = pd.DataFrame(\n index=self.date_range_workdays, columns=self.people.index\n )\n unavail_client = self.get_client_id(\"UNAVAILABLE\")\n unavail_projects = self.get_client_projects(unavail_client)\n for person_id in self.people.index:\n self.people_capacities[person_id] = self.people.capacity[person_id]\n\n for proj_id in self.people_allocations[person_id].columns:\n if proj_id in unavail_projects:\n self.people_capacities[person_id] = (\n self.people_capacities[person_id]\n - self.people_allocations[person_id][proj_id]\n )\n # check for incorrect allocations leading to negative capacity\n negative = self.people_capacities[person_id] < 0\n if negative.any():\n warnings.warn(\n f\"Person ID {person_id} has negative capacities. \"\n \"Reset to 0.\"\n )\n self.people_capacities[person_id][negative] = 0\n\n self.team_capacity = self.people_capacities.sum(axis=1)\n self.people_free_capacity = self.people_capacities - self.people_totals\n\n # project_allocations: dict with key project_id, contains df of\n # (date, person_id) with allocation project_confirmed: df of (date, project_id)\n # with total allocations across PEOPLE ONLY\n self.project_allocations, self.project_confirmed = self._get_allocations(\n \"project\"\n )\n\n # project_unconfirmed: df of (date, project_id) with total allocation to\n # unconfirmed placeholders\n self.project_unconfirmed = self._get_project_unconfirmed()\n\n # project_deferred: df of (date, project_id) with total allocation to deferred\n # placeholders\n self.project_deferred = self._get_project_deferred()\n\n # project_peoplereq: people_required allocations to each project\n self.project_peoplereq = self._get_project_required()\n\n # project_notfunded allocations to each project\n self.project_notfunded = self._get_project_notfunded()\n\n # project_confirmed: should not include unconfirmed or deferred totals\n self.project_confirmed = (\n self.project_confirmed\n - self.project_unconfirmed\n - self.project_deferred\n - self.project_notfunded\n )\n\n self.project_allocated = self.project_confirmed - self.project_peoplereq\n\n # Time Tracking\n if with_tracked_time:\n self.tracked_project_tasks = self._get_tracking(\"project\", \"task\")\n self.tracked_project_people = self._get_tracking(\"project\", \"person\")\n self.tracked_person_projects = self._get_tracking(\"person\", \"project\")\n self.tracked_person_tasks = self._get_tracking(\"person\", \"task\")\n\n self.tracked_project_totals = self._get_tracking(\"project\", \"TOTAL\")\n self.tracked_person_totals = self._get_tracking(\"person\", \"TOTAL\")\n self.tracked_task_totals = self._get_tracking(\"task\", \"TOTAL\")\n\n # calculate per-client totals for each person\n self.tracked_person_clients = self._client_from_project_tracking(\n self.tracked_person_projects\n )\n\n # calculate overall per-client totals\n self.tracked_client_totals = self._client_from_project_tracking(\n self.tracked_project_totals\n )\n\n def get_person_name(self, person_id):\n \"\"\"Get the name of someone from their person_id\"\"\"\n return self.people.loc[person_id, \"name\"]\n\n def get_person_id(self, name):\n \"\"\"Get the person_id of someone from their first_name and last_name.\"\"\"\n person_id = self.people.loc[(self.people[\"name\"] == name)]\n\n if len(person_id) != 1:\n warnings.warn(\n \"Could not find unique person with name \"\n + name\n + \". This person may have unlinked Harvest & Forecast accounts. \"\n \" Returning first available index. This may cause errors elsewhere!\"\n )\n\n return person_id.index[0]\n\n def get_project_name(self, project_id):\n \"\"\"Get the name of a project from its project_id\"\"\"\n return self.projects.loc[project_id, \"name\"]\n\n def get_project_id(self, project_name):\n \"\"\"Get the id of a project from its name\"\"\"\n return self.projects.index[self.projects.name == project_name][0]\n\n def get_client_name(self, client_id):\n \"\"\"Get the name of a project from its project_id\"\"\"\n return self.clients.loc[client_id, \"name\"]\n\n def get_client_id(self, client_name):\n return self.clients.index[self.clients.name == client_name][0]\n\n def get_client_projects(self, client_id):\n return self.projects.index[self.projects.client == client_id]\n\n def get_task_name(self, task_id):\n \"\"\"Get the name of a task from its id\"\"\"\n return self.tasks.loc[task_id, \"name\"]\n\n def get_task_id(self, task_name):\n \"\"\"Get the id of a task from its name\"\"\"\n return self.tasks.index[self.tasks.name == task_name][0]\n\n def get_association_name(self, association_id):\n \"\"\"get the association name from the association id\"\"\"\n return self.associations.loc[association_id, \"name\"]\n\n def get_association_id(self, association_name):\n \"\"\"get the association id from the association name\"\"\"\n return self.associations.index[self.associations.name == association_name][0]\n\n def get_name(self, id_value, id_type):\n \"\"\"Get the name of an id based on the type of id it is. id_type can be\n 'person', 'project', 'client', or 'task'\"\"\"\n if id_type == \"person\":\n return self.get_person_name(id_value)\n elif id_type == \"project\":\n return self.get_project_name(id_value)\n elif id_type == \"client\":\n return self.get_client_name(id_value)\n elif id_type == \"task\":\n return self.get_task_name(id_value)\n else:\n raise ValueError(\"id_type must be person, project, client or task\")\n\n def get_person_allocations(self, name):\n idx = self.get_person_id(name)\n return self.people_allocations[idx]\n\n def get_id(self, name, id_type):\n \"\"\"Get the name of an id based on the type of id it is. id_type can be\n 'person', 'project' or 'client', or 'task'.\"\"\"\n if id_type == \"person\":\n return self.get_person_id(name)\n\n elif id_type == \"project\":\n return self.get_project_id(name)\n\n elif id_type == \"client\":\n return self.get_client_id(name)\n\n elif id_type == \"task\":\n return self.get_task_id(name)\n\n else:\n raise ValueError(\"id_type must be person or project\")\n\n def get_active_people(self, start_date, end_date, names=False, partners=True):\n \"\"\"People with capacity (any capacity, not just free capacity) between\n start_date and end_date\n \"\"\"\n active = select_date_range(self.people_capacities, start_date, end_date)\n if not partners:\n # don't include university partners\n active = active.loc[\n :,\n self.people.loc[active.columns, \"association\"]\n != self.get_association_id(\"University Partner\"),\n ]\n if names:\n return self.people.loc[active.columns, \"name\"]\n else:\n return active.columns\n\n def get_active_projects(self, start_date, end_date, names=False):\n \"\"\"Projects with requirerments between start_date and end_date\"\"\"\n proj = select_date_range(self.project_confirmed, start_date, end_date)\n if names:\n return self.projects.loc[proj.columns, \"name\"]\n else:\n return proj.columns\n\n def whiteboard(self, key_type, start_date, end_date, freq):\n \"\"\"Create the raw, unstyled, whiteboard visualisation.\n\n Dataframe with the rows being key_type (project or person ids), the columns\n dates and the cell values being either a person or project and their time allocation, sorted by time allocation.\n ]\"\"\"\n # TODO move this function somewhere else?\n if key_type == \"project\":\n # copy to prevent overwriting original\n data_dict = deepcopy(self.project_allocations)\n\n elif key_type == \"person\":\n data_dict = deepcopy(self.people_allocations)\n\n else:\n return ValueError(\"key type must be person or project\")\n\n unavail_client = self.get_client_id(\"UNAVAILABLE\")\n unavail_project_ids = self.get_client_projects(unavail_client)\n unavail_project_names = [\n self.get_project_name(idx) for idx in unavail_project_ids\n ]\n\n sheet = {}\n # set of unique project names used for cell colouring later\n names = set()\n\n # for each project\n for key in data_dict.keys():\n # get the projects's person allocations\n df = data_dict[key]\n\n # replace ids with names. for project id: include people required.\n if key_type == \"project\":\n if key in unavail_project_ids:\n # don't display allocations to unavailable project\n continue\n\n df.columns = [\n self.get_name(person_id, \"person\") for person_id in df.columns\n ]\n\n df.columns.name = self.get_name(key, \"project\")\n\n elif key_type == \"person\":\n df.columns = [\n self.get_name(project_id, \"project\") for project_id in df.columns\n ]\n\n df.columns.name = self.get_name(key, \"person\")\n\n else:\n return ValueError(\"key type must be person or project\")\n\n if key_type == \"person\":\n # add flags for people with free capacity or over capacity\n # unallocated\n df[\"UNALLOCATED\"] = self.people_free_capacity[key]\n # set overallocated cases to 0\n df.loc[df[\"UNALLOCATED\"] < 0, \"UNALLOCATED\"] = 0\n # over allocated\n df[\"OVER CAPACITY\"] = self.people_free_capacity[key]\n # set under allocated cases to 0\n df.loc[df[\"OVER CAPACITY\"] > 0, \"OVER CAPACITY\"] = 0\n # make remaining values positive\n df[\"OVER CAPACITY\"] = df[\"OVER CAPACITY\"].abs()\n\n # extract the date range of interest\n df = select_date_range(df, start_date, end_date)\n\n if key_type == \"person\" and df.columns.isin(unavail_project_names).all():\n # don't display people who are only assigned as unavailable\n continue\n\n # check there are project allocations to display\n if df.shape[0] > 0 and df.shape[1] > 0:\n\n # update the set of names\n [names.add(col) for col in df.columns]\n\n # resample the data to the given date frequency\n if freq != \"D\":\n df = df.resample(freq).mean()\n\n # sort columns by magnitude of earliest assignment\n df = df.sort_values(\n by=[idx for idx in df.index], axis=1, ascending=False\n )\n\n # max number items assigned to this key at a time\n n_columns = (df > 0).sum(axis=1).max()\n\n # initialise data frame to store ranked time assignments\n key_sheet = pd.DataFrame(\n \"\", index=df.index, columns=range(1, n_columns + 1)\n )\n\n fill_idx = None\n\n for name_idx, name in enumerate(df.columns):\n # flags dates where this name has a non-zero allocation\n nonzero_allocs = df.iloc[:, name_idx] > 0\n\n # choose where to place new allocations in key_sheet\n for key_col in key_sheet.columns:\n # flags columns in key_sheet where the new allocations in df[name] overlap\n # with previous allocations added to key_sheet\n conflicts = key_sheet.loc[nonzero_allocs, key_col].str.len() > 0\n\n # if there is no overlap between new allocations and this column we can fill the values there\n if (~conflicts).all():\n fill_idx = key_col\n break\n\n if fill_idx is None:\n raise KeyError(\"no suitable column to fill without conflicts\")\n\n # insert the new allocations with format <NAME> (<ALLOCATION>)\n key_sheet.loc[nonzero_allocs, fill_idx] = name + df.iloc[\n nonzero_allocs.values, name_idx\n ].apply(lambda x: \"<br>({:.1f})\".format(x))\n\n # remove unused columns\n [\n key_sheet.drop(col, axis=1, inplace=True)\n for col in key_sheet.columns\n if key_sheet[col].str.len().sum() == 0\n ]\n\n # format dates nicely\n if freq == \"MS\":\n key_sheet.index = key_sheet.index.strftime(\"%b-%Y\")\n elif freq == \"W-MON\":\n key_sheet.index = key_sheet.index.strftime(\"%d-%b-%Y\")\n else:\n key_sheet.index = key_sheet.index.strftime(\"%Y-%m-%d\")\n\n # store the allocations - transpose to get rows as keys and columns as dates\n sheet[df.columns.name] = key_sheet.T\n\n # merge everything together into one large dataframe, sorted by key\n sheet = pd.concat(sheet).sort_index()\n\n if key_type == \"project\":\n\n # Get project client names\n proj_idx = [\n self.get_project_id(name) for name in sheet.index.get_level_values(0)\n ]\n client_idx = self.projects.loc[proj_idx, \"client\"]\n client_name = self.clients.loc[client_idx, \"name\"]\n\n # Add project client info to index (~programme area)\n sheet[\"client_name\"] = client_name.values\n self._name_whiteboard_index(sheet, \"client_name\", \"project_name\")\n # Move REG/Turing support projects to end\n clients = client_name.unique()\n reg = [client for client in clients if \"REG\" in client]\n reg.append(\"Corporate Duties\")\n reg.append(\"Turing Service Areas\")\n reg.append(\"Turing Programme Support\")\n reg = sorted(reg)\n\n others = sorted([client for client in clients if client not in reg])\n sheet = sheet.reindex(others + reg, level=0)\n\n # Remove index headings\n sheet.index.rename([None, None, None], inplace=True)\n\n # Get GitHub issue numbers, add as hrefs\n proj_names = sheet.index.levels[1].values\n proj_idx = [self.get_project_id(name) for name in proj_names]\n proj_gitissue = [self.projects.loc[idx, \"github\"] for idx in proj_idx]\n git_base_url = \"https://github.com/alan-turing-institute/Hut23/issues\"\n\n proj_names_with_url = {\n proj: \"\"\"<a href=\"{url}/{issue}\">{proj}<br>[GitHub: #{issue}]</a>\"\"\".format(\n url=git_base_url, issue=int(proj_gitissue[idx]), proj=proj\n )\n for idx, proj in enumerate(proj_names)\n if not np.isnan(proj_gitissue[idx])\n }\n\n sheet.rename(proj_names_with_url, axis=\"index\", level=1, inplace=True)\n\n elif key_type == \"person\":\n # Get person association group\n person_idx = [\n self.get_person_id(name) for name in sheet.index.get_level_values(0)\n ]\n\n assoc_idx = [self.people.loc[idx, \"association\"] for idx in person_idx]\n\n group_name = [self.associations.loc[idx, \"name\"] for idx in assoc_idx]\n\n # Add project client info to index (~programme area)\n sheet[\"group_name\"] = group_name\n self._name_whiteboard_index(sheet, \"group_name\", \"person_name\")\n sheet = sheet.reindex(\n [\n \"REG Director\",\n \"REG Principal\",\n \"REG Senior\",\n \"REG Standard\",\n \"REG Junior\",\n \"REG Associate\",\n \"University Partner\",\n \"Placeholder\",\n ],\n level=0,\n )\n\n sheet.index.rename([None, None, None], inplace=True)\n\n return sheet\n\n def _name_whiteboard_index(self, sheet, index_col, group_name):\n sheet.set_index([index_col, sheet.index], inplace=True)\n sheet.index.rename(group_name, 1, inplace=True)\n sheet.index.rename(\"row\", 2, inplace=True)\n\n sheet.sort_values(by=[index_col, group_name, \"row\"], inplace=True)\n\n def _get_allocations(self, id_column):\n \"\"\"For each unique value in id_column, create a dataframe where\n the rows are dates, the columns are projects/people depending on\n id_column, and the values are time allocations for that date.\n id_column can be 'person' or 'project'.\"\"\"\n if id_column == \"person\":\n grouped_allocations = self.assignments.groupby(\n [\"person\", \"project\", \"start_date\", \"end_date\"]\n ).allocation.sum()\n id_values = self.people.index\n ref_column = \"project\"\n\n elif id_column == \"project\":\n grouped_allocations = self.assignments.groupby(\n [\"project\", \"person\", \"start_date\", \"end_date\"]\n ).allocation.sum()\n id_values = self.projects.index\n ref_column = \"person\"\n\n else:\n raise ValueError(\"id_column must be person or project\")\n\n allocations = {}\n\n for idx in id_values:\n # check whether the this id has any assignments, i.e. whether the id\n # exists in the index (get_level_values to deal with MultiIndex)\n if idx in grouped_allocations.index.get_level_values(0):\n # get the allocations\n id_allocs = grouped_allocations.loc[idx]\n\n # unstack the MultiIndex\n id_allocs = id_allocs.reset_index()\n\n # Initialise dataframe to store results\n id_alloc_days = pd.DataFrame(\n index=self.date_range_workdays,\n columns=id_allocs[ref_column].unique(),\n )\n id_alloc_days.fillna(0, inplace=True)\n\n # Loop over each assignment\n for _, row in id_allocs.iterrows():\n # Create the range of business days that this assignment\n # corresponds to\n dates = get_business_days(row[\"start_date\"], row[\"end_date\"])\n\n # Add the allocation to the corresponding project for the\n # range of dates.\n id_alloc_days.loc[dates, row[ref_column]] += row[\"allocation\"]\n\n else:\n # no projects, just make an empty dataframe\n id_alloc_days = pd.DataFrame(index=self.date_range_workdays)\n\n # Add the person's name as a label - just nice for printing later.\n id_alloc_days.columns.name = self.get_name(idx, id_column)\n\n allocations[idx] = id_alloc_days\n\n # total assignment each day\n totals = pd.DataFrame(index=self.date_range_workdays, columns=id_values)\n unavail_client = self.get_client_id(\"UNAVAILABLE\")\n unavail_projects = self.get_client_projects(unavail_client)\n for idx in allocations:\n if ref_column == \"project\":\n # don't include unavailable project in totals\n alloc_wo_unavil = allocations[idx].drop(\n [\n proj\n for proj in allocations[idx].columns\n if proj in unavail_projects\n ],\n axis=1,\n )\n totals[idx] = alloc_wo_unavil.sum(axis=1)\n else:\n totals[idx] = allocations[idx].sum(axis=1)\n\n return allocations, totals\n\n def _get_project_unconfirmed(self):\n \"\"\"Get unconfirmed project requirements\"\"\"\n\n unconf_idx = self.get_person_id(\"UNCONFIRMED\")\n\n project_unconfirmed = pd.DataFrame(\n 0, index=self.date_range_workdays, columns=self.projects.index\n )\n\n allocs = self.people_allocations[unconf_idx]\n\n for project in allocs.columns:\n project_unconfirmed[project] += allocs[project]\n\n return project_unconfirmed\n\n def _get_project_deferred(self):\n \"\"\"Get deferred project allocations\"\"\"\n\n defer_idx = self.get_person_id(\"DEFERRED\")\n\n project_deferred = pd.DataFrame(\n 0, index=self.date_range_workdays, columns=self.projects.index\n )\n\n allocs = self.people_allocations[defer_idx]\n\n for project in allocs.columns:\n project_deferred[project] += allocs[project]\n\n return project_deferred\n\n def _get_project_notfunded(self):\n \"\"\"Get deferred project allocations\"\"\"\n\n notfunded_idx = self.get_person_id(\"NOT FUNDED\")\n\n project_notfunded = pd.DataFrame(\n 0, index=self.date_range_workdays, columns=self.projects.index\n )\n\n allocs = self.people_allocations[notfunded_idx]\n\n for project in allocs.columns:\n project_notfunded[project] += allocs[project]\n\n return project_notfunded\n\n def _get_project_required(self):\n \"\"\"Get people required (i.e. needs someone assigned)\n for all projects.\"\"\"\n\n peoplereq_idx = self.get_person_id(\"PEOPLE REQUIRED\")\n\n project_peoplereq = pd.DataFrame(\n 0, index=self.date_range_workdays, columns=self.projects.index\n )\n\n allocs = self.people_allocations[peoplereq_idx]\n\n for project in allocs.columns:\n project_peoplereq[project] += allocs[project]\n\n return project_peoplereq\n\n def _get_tracking(self, id_column, ref_column):\n \"\"\"For each unique value in id_column, create a dataframe where the rows are dates,\n the columns are projects/people/clients/tasks depending on id_column, and the values are\n tracked time for each project/person/client/task for each date.\n id_column can be 'person', 'project', 'client', or 'task'\n ref_column can be 'person', 'project', 'client', 'task' or 'TOTAL' but must not be same as id_column.\"\"\"\n\n if ref_column == id_column:\n raise ValueError(\"id_column and ref_column must be different.\")\n\n # id column\n if id_column == \"person\":\n id_values = self.people.index\n\n elif id_column == \"project\":\n id_values = self.projects.index\n\n elif id_column == \"client\":\n id_values = self.clients.index\n\n elif id_column == \"task\":\n id_values = self.tasks.index\n else:\n raise ValueError(\"id_column must be person, project, client or task\")\n\n # ref_column\n if ref_column not in [\"person\", \"project\", \"client\", \"task\", \"TOTAL\"]:\n raise ValueError(\n \"\"\"id_column must be person, project, client,\n task or TOTAL\"\"\"\n )\n\n # group time_entries by id_column, ref_column and date\n if ref_column == \"TOTAL\":\n grouped_entries = self.time_entries.groupby([id_column, \"date\"]).hours.sum()\n else:\n grouped_entries = self.time_entries.groupby(\n [id_column, ref_column, \"date\"]\n ).hours.sum()\n\n # populate the entries dict from grouped_entries\n # entries is a dict with id_column values as keys and the items being a dataframe with ref_column as the index\n entries = {}\n\n for idx in id_values:\n # check whether the this id has any time entries, i.e. whether the id\n # exists in the index (get_level_values to deal with MultiIndex)\n if idx in grouped_entries.index.get_level_values(0):\n # get the allocations\n id_entries = grouped_entries.loc[idx]\n\n # unstack the MultiIndex\n id_entries = id_entries.reset_index()\n\n # Initialise dataframe to store results\n if ref_column == \"TOTAL\":\n id_entry_days = pd.Series(index=self.date_range_alldays)\n else:\n id_entry_days = pd.DataFrame(\n index=self.date_range_alldays,\n columns=id_entries[ref_column].unique(),\n )\n\n id_entry_days.fillna(0, inplace=True)\n\n # Loop over each time entry\n for _, row in id_entries.iterrows():\n if ref_column == \"TOTAL\":\n id_entry_days.loc[row[\"date\"]] += row[\"hours\"]\n else:\n id_entry_days.loc[row[\"date\"], row[ref_column]] += row[\"hours\"]\n\n elif ref_column == \"TOTAL\":\n id_entry_days = pd.Series(index=self.date_range_alldays).fillna(0)\n else:\n id_entry_days = pd.DataFrame(index=self.date_range_alldays)\n\n # Add the person's name as a label - just nice for printing later.\n if ref_column != \"TOTAL\":\n id_entry_days.columns.name = self.get_name(idx, id_column)\n\n entries[idx] = id_entry_days\n\n if ref_column == \"TOTAL\":\n entries = pd.DataFrame(entries)\n\n return entries\n\n def _client_from_project_tracking(self, tracking):\n \"\"\"Group previously calculated project tracking values by client.\n\n Arguments:\n tracking {pd.DataFrame or dict} -- a single dataframe or a dict of\n dataframes containing project ids as columns.\n\n Raises:\n TypeError: if tracking is not an instance of pd.DataFrame or dict.\n\n Returns:\n pd.DataFrame or dict -- same format as tracking except with columns\n now being client ids.\n \"\"\"\n if isinstance(tracking, pd.DataFrame):\n grouped_df = self._sum_tracking_by_client(tracking)\n return grouped_df\n\n elif isinstance(tracking, dict):\n grouped_dict = {}\n for idx, df in tracking.items():\n grouped_df = self._sum_tracking_by_client(df)\n grouped_dict[idx] = grouped_df\n\n return grouped_dict\n else:\n raise TypeError(\"tracking must be dataframe or dict of dataframes\")\n\n def _sum_tracking_by_client(self, df):\n result = df.copy(deep=True)\n result.columns = [self.projects.loc[col, \"client\"] for col in result.columns]\n result = result.groupby(result.columns, axis=1).sum()\n return result\n"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"pandas.Series",
"numpy.isnan",
"pandas.DataFrame",
"pandas.date_range",
"pandas.tseries.offsets.BDay"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
CyrilCadoux/dsp-labs | [
"8ef53fccb87ad842051d9032d127a86c1172155f"
] | [
"scripts/filter_design/iir_comparison.py"
] | [
"\"\"\"\nCompare various IIR filters\n\"\"\"\n\nimport numpy as np\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\n\ndef freq2rad(freq, fs):\n return freq * np.pi / (fs/2)\n\n\ndef rad2freq(rad, fs):\n return rad * (fs/2) / np.pi\n\n\n# MAIN PARAMETER\npole_coef = 0.95\nfs = 16000\n\n# prepare figure\nALPHA = 0.8\nf_max = 4000\nplt.figure()\n\n# simple filter\nb = np.array([1, -1])\nw, h = signal.freqz(b)\nplt.semilogx([rad2freq(rad, fs) for rad in w],\n 20 * np.log10(abs(h)),\n label=\"simple (2-tap)\",\n alpha=ALPHA)\n\n# First order single pole\nb = np.array([1., -1.])\na = np.array([1, -1*pole_coef])\n\nw, h = signal.freqz(b, a)\nplt.semilogx([rad2freq(rad, fs) for rad in w],\n 20 * np.log10(abs(h)),\n label=\"1-stage\",\n alpha=ALPHA)\n\n# (2nd order)\nb = np.array([1., -2., 1.])\na = np.array([1, -2*pole_coef, pole_coef*pole_coef])\n\nw, h = signal.freqz(b, a)\nplt.semilogx([rad2freq(rad, fs) for rad in w],\n 20 * np.log10(abs(h)),\n label=\"2-stage\",\n alpha=ALPHA)\n\n# (3rd order)\nb = np.array([1., -3., 3., -1.])\na = np.array([1, -3*pole_coef, 3*pole_coef*pole_coef, -1*pole_coef**3])\n\nw, h = signal.freqz(b, a)\nplt.semilogx([rad2freq(rad, fs) for rad in w],\n 20 * np.log10(abs(h)),\n label=\"3-stage\",\n alpha=ALPHA)\n\n\nplt.margins(0, 0.1)\nplt.title(\"Frequency response for varying num. of stages (log scale)\")\nplt.xlabel(\"Frequency [Hz]\")\nplt.ylabel(\"Magnitude [dB]\")\nplt.grid()\nplt.legend(loc=\"lower right\")\nplt.tight_layout()\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"scipy.signal.freqz",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
virajmehta/differentiable_grasp_quality | [
"3d22a5420ceac7bdd9104670f56b755787a677e6"
] | [
"src/gq_gradient_descent.py"
] | [
"import os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom tf_grasp_quality import grasp_quality\nfrom ipdb import set_trace as db\n\nkEpsilon = 1e-5\nkMaxIterations = 1000 #probably way too big\nkLearningRate = 0.1\noutput_grasp_dir = 'test_data/outputs/'\nlog_dir = output_grasp_dir + 'logdir/'\n\n\ndef gq_gradient_descent(vertices_data, triangles_data, normals_data,\n all_grasps):\n all_out_grasps = np.zeros((0, kMaxIterations + 1, 6))\n in_qualities = []\n out_qualities = []\n for g in range(len(all_grasps)):\n out_grasps = np.zeros((kMaxIterations+ 1, 6))\n grasps_data = all_grasps[g:g+1, :]\n out_grasps[0, :] = grasps_data[0,:]\n grasps_data = np.expand_dims(grasps_data, 0)\n with tf.Session() as sess:\n # build input variables\n grasps_init = tf.constant(grasps_data, dtype=tf.float32)\n grasps = tf.get_variable('grasps', dtype=tf.float32,\n initializer=grasps_init)\n triangles = tf.placeholder(tf.int32, shape=(1,\n triangles_data.shape[1], 3), name='triangles')\n vertices = tf.placeholder(tf.float32, shape=(1,\n vertices_data.shape[1], 3), name='vertices')\n normals = tf.placeholder(tf.float32, shape=(1,\n vertices_data.shape[1], 3), name='normals')\n quality = grasp_quality(grasps, vertices, triangles, normals)\n\n # setup tf optimization\n train_step = tf.train.AdagradOptimizer(kLearningRate).minimize(quality)\n #train_step = tf.train.GradientDescentOptimizer(kLearningRate).minimize(quality)\n\n # setup loss logging\n quality_log = tf.squeeze(quality)\n summary = tf.summary.scalar('quality_log', quality_log)\n summary = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(log_dir)\n\n # get ready to go\n original_score = None\n tf.global_variables_initializer().run()\n min_loss = 17 # higher than worst\n min_loss_index = -1\n for i in range(kMaxIterations):\n feed_dict = {triangles: triangles_data, vertices: vertices_data,\n normals: normals_data}\n grasps_out, quality_out, _, summary_out = sess.run([grasps,\n quality, train_step, summary], feed_dict=feed_dict)\n train_writer.add_summary(summary_out, i)\n if quality_out[0,0] < min_loss:\n min_loss = quality_out[0,0]\n min_loss_index = i\n if i % 10 == 0:\n print('descent step %d loss: %f'%(i, quality_out[0,0]))\n out_grasps[i+1,:] = grasps_out[0,0,:]\n if quality_out[0,0] < kEpsilon:\n print('descent step %d loss: %f'%(i, quality_out[0,0]))\n out_grasps[-1,:] = grasps_out[0,0,:]\n out_qualities.append(quality_out[0,0])\n break\n if i == 0:\n in_qualities.append(quality_out[0,0])\n original_score = quality_out[0,0]\n if i == kMaxIterations - 1:\n out_grasps[min_loss_index + 1:-1] = 0\n out_grasps[-1,:] = out_grasps[min_loss_index, :]\n out_qualities.append(min_loss)\n train_writer.close()\n print('grasp: ', g)\n print('original_score', original_score)\n print('final_score', min_loss)\n out_grasps = np.expand_dims(out_grasps, 0)\n all_out_grasps = np.concatenate((all_out_grasps, out_grasps))\n tf.reset_default_graph()\n return all_out_grasps, in_qualities, out_qualities \n\ndef load_input(name):\n vertices_path = 'test_data/%s_vertices.npy'%name\n triangles_path = 'test_data/%s_triangles.npy'%name\n normals_path = 'test_data/%s_normals.npy'%name\n grasps_path = 'test_data/%s_grasps.npy'%name\n vertices_data = np.load(vertices_path)\n num_vertices = vertices_data.shape[0]\n triangles_data = np.load(triangles_path)\n triangles_data = triangles_data.astype(int)\n num_triangles = triangles_data.shape[0]\n normals_data = np.load(normals_path)\n all_grasps = np.load(grasps_path)\n return vertices_data, triangles_data, normals_data, all_grasps\n\n\ndef process_input(vertices_data, triangles_data, normals_data, grasps_data):\n centroid = np.mean(vertices_data, axis=0, keepdims=True)\n vertices_data -= centroid\n centroid2 = np.array([centroid[0,i] for i in range(3) for _ in range(2)])\n centroid2 = np.expand_dims(centroid2, 0)\n grasps_data -= centroid2\n max_norm = np.max(np.linalg.norm(vertices_data, axis=1))\n vertices_data /= max_norm\n grasps_data /= max_norm\n vertices = np.expand_dims(vertices_data, 0)\n triangles = np.expand_dims(triangles_data, 0)\n normals = np.expand_dims(normals_data, 0)\n return max_norm, centroid, vertices, triangles, normals, grasps_data\n\ndef process_output(grasps, scale, offset):\n grasps *= scale\n offset = np.array([offset[0,i] for i in range(3) for _ in range(2)])\n for i in range(grasps.shape[0]):\n for j in range(grasps.shape[1]):\n if grasps[i,j,0] != 0 or grasps[i,j,1] != 0 or grasps[i,j,2] != 0\\\n or grasps[i,j,3] != 0 or grasps[i,j,4] != 0 or grasps[i,j,5] != 0:\n grasps[i,j,:] += offset\n return grasps\n\n\ndef save_grasps(grasps, name, cat=\"\"):\n all_grasp_name = os.path.join(output_grasp_dir, cat, '%s_grasps_path'%(name))\n np.save(all_grasp_name, grasps)\n io_grasp_name = os.path.join(output_grasp_dir, cat, '%s_grasps_io'%(name))\n first_grasps = grasps[:,0,:]\n last_grasps = grasps[:,-1,:]\n io_grasps = np.concatenate((first_grasps, last_grasps))\n np.save(io_grasp_name, io_grasps)\n\n\n\nif __name__ == '__main__':\n vertices_data, triangles_data, normals_data, grasps_data = load_input(\n sys.argv[1])\n scale, centroid, vertices, triangles, normals, grasps = process_input(\n vertices_data, triangles_data, normals_data, grasps_data)\n out_grasps_raw, iq, oq = gq_gradient_descent(vertices, triangles, normals, grasps)\n out_grasps_processed = process_output(out_grasps_raw, scale, centroid)\n save_grasps(out_grasps_processed, sys.argv[1])\n"
] | [
[
"tensorflow.get_variable",
"numpy.expand_dims",
"tensorflow.constant",
"tensorflow.summary.scalar",
"tensorflow.summary.FileWriter",
"tensorflow.train.AdagradOptimizer",
"numpy.linalg.norm",
"numpy.save",
"tensorflow.placeholder",
"numpy.concatenate",
"tensorflow.squeeze",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"numpy.mean",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"numpy.load",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
liuwell/srat | [
"7ee7e69bac1aaef70ba436e6c002c102bb4a6fc7"
] | [
"srat/corr_heatmap.py"
] | [
"#!/usr/bin/env python3\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport argparse\n\n\n# Reference\n# Ref links: https://matplotlib.org/gallery/images_contours_and_fields/image_annotated_heatmap.html\n\n# The usage of the following functions and methods is shown in this script:\n# matplotlib.axes.Axes.imshow\n# matplotlib.pyplot.imshow\n# matplotlib.figure.Figure.colorbar\n# matplotlib.pyplot.colorbar\n\n\ndef heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\")\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=-30, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=2)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar\n\n\ndef annotate_heatmap(im, data=None, valfmt=\"{x:.2f}\",\n textcolors=(\"black\", \"white\"),\n threshold=None, **textkw):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A pair of colors. The first is used for values below a threshold,\n the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n texts.append(text)\n\n return texts\n\n\n# We can nicely plot a correlation matrix. Since this is bound by -1 and 1,\n# we use those as vmin and vmax. We may also remove leading zeros and hide\n# the diagonal elements (which are all 1) by using a\n# `matplotlib.ticker.FuncFormatter`.\n\n# Test data\n'''\nvegetables = [\"cucumber\", \"tomato\", \"lettuce\", \"asparagus\",\n \"potato\", \"wheat\", \"barley\"]\nfarmers = [\"Farmer Joe\", \"Upland Bros.\", \"Smith Gardening\",\n \"Agrifun\", \"Organiculture\", \"BioGoods Ltd.\", \"Cornylee Corp.\"]\n\nharvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],\n [2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],\n [1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],\n [0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],\n [0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],\n [1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],\n [0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])\n\ncorr_matrix = np.corrcoef(harvest)\nim, _ = heatmap(corr_matrix, vegetables, vegetables,\n cmap=\"RdBu\", vmin=-1, vmax=1,\n cbarlabel=\"correlation coeff.\")\n\n\ndef func(x, pos):\n return \"{:.2f}\".format(x).replace(\"0.\", \".\").replace(\"1.00\", \"\")\n\n#annotate_heatmap(im, valfmt=matplotlib.ticker.FuncFormatter(func), size=7)\nannotate_heatmap(im, size=7)\n\nplt.tight_layout()\nplt.savefig(\"test.pdf\")\nplt.close()\n'''\nimport seaborn as sns\ndef corr_heatmap(fi):\n\tprefix = fi.split('.')[0]\n\t\n\tdf = pd.read_csv(fi, sep=\"\\t\", header=0, index_col=0)\n\tdfCorr = df.corr()\n\t#print(pd.read_csv(fi, sep='\\t',header=0,index_col=0))\n\tnames = list(dfCorr.index)\n\t#print(names)\n\tim, _ = heatmap(dfCorr, names, names,\n\t cmap=\"RdBu\", vmin=0, vmax=1,\n\t cbarlabel=\"correlation coeff.\")\n\t\n\tannotate_heatmap(im, size=7)\n\t\n\tplt.tight_layout()\n\tfo1 = prefix + '_cor_heatmap.pdf'\n\tplt.savefig(fo1)\n\tplt.close()\n\n\t###\n\tsns.pairplot(np.log2(df+1))\n\tfo2 = prefix + '_matrix_point.pdf'\n\tplt.savefig(fo2)\n\tplt.close()\n\n\tprint(\"\\n# Finished the correlation heatmap\")\n\tprint(\"\\n# Input file: %s\" % fi)\n\tprint(\"\\n# Output figure: %s, %s\\n\" % (fo1, fo2))\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='The correlation and heatmap of expression data, such as miRNA expression, gene expression')\n\n\tparser.add_argument('-i', '--input', required=True, help='the input data')\n\t#parser.add_argument('-o', '--output', required=True, help='the output figure')\n\n\targs = parser.parse_args()\n\n\tcorr_heatmap(args.input)\n\n\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"matplotlib.ticker.StrMethodFormatter",
"numpy.log2",
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
bdmckean/woot_math_analysis | [
"d3308e413da2e41fff5060f10339b5ab4d01d690"
] | [
"working/EDA_WM-BrianMc.py"
] | [
"\n# coding: utf-8\n\n# In[1]:\n\nimport pymongo\nimport pandas as pd\nimport numpy as np\n\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\nimport datetime\n\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\n\n\nget_ipython().magic(u'matplotlib inline')\nimport json\nplt.style.use('ggplot')\n\nimport seaborn as sns\n\n\n# In[2]:\n\n## Connect to local DB\n\nclient = MongoClient('localhost', 27017)\nprint (\"Setup db access\")\n\n\n# In[3]:\n\n#\n# Get collections from mongodb\n#\ndb = client.my_test_db\nreponses = db.anon_student_task_responses.find()\n\n\n# In[4]:\n\ndf_responses = pd.DataFrame(list(reponses))\n\n\n# In[5]:\n\nprint (df_responses.head())\n\n\n# In[6]:\n\n## Look act columns\nprint (df_responses.columns)\n\n\n# In[7]:\n\n## How many data samples\nprint (len(df_responses), \"Number of entries\")\n\n\n# In[8]:\n\n## Example data samle\nprint (df_responses.iloc[1])\n\n\n# In[9]:\n\nprint (\"Number of unique lessions\", len(df_responses['lesson'].unique()) )\nprint (\"Unique lessions\", df_responses['lesson'].unique())\n\n\n# In[10]:\n\nprint (\"Samples of each lesson\",df_responses['lesson'].value_counts())\n\n\n# In[11]:\n\nprint (\"Summary sample :\", df_responses['level_summary'][0])\n\n\n# In[12]:\n\n## Promote student info, level summary, level summary problem results\n\n\n# In[13]:\n\ndf2 = df_responses.join(pd.DataFrame(df_responses[\"student\"].to_dict()).T)\n\n\n# In[14]:\n\ndf2 = df2.join(pd.DataFrame(df2['level_summary'].to_dict()).T)\n\n\n# In[15]:\n\ndf2 = df2.join(pd.DataFrame(df2['problems'].to_dict()).T)\n\n\n# In[16]:\n\ndf_student1 = df2.groupby('student_id').agg({ 'lesson':[len, pd.Series.nunique ], 'ntotal':sum, 'nright':sum })\n\n\n# In[17]:\n\ndf_student1['percent_correct'] = df_student1['nright']['sum'].astype(float) / df_student1['ntotal']['sum']\n\n\n# In[18]:\n\ndf_student1\n\n\n# In[19]:\n\ny1 = np.array(df_student1['lesson']['len'])\n\n\n# In[ ]:\n\n\n\n\n# In[20]:\n\n# Total Lessons per student\n\nplt.title(' Number of students performing number lessons ')\nplt.xlabel('Number of total lessons attempted')\nplt.ylabel('Number of students')\nplt.hist(y1, bins=40)\n\n\n\n\n# In[21]:\n\n# Uniqe students per # of unique lessons\ny2 = np.array(df_student1['lesson']['nunique'])\nplt.title(' Number of students performing lesson types')\nplt.xlabel('Number of unique lessons attempted')\nplt.ylabel('Number of students')\nplt.hist(y2, bins=40)\n\n\n# In[ ]:\n\n\n\n\n# In[22]:\n\n# Uniqe students per # of unique lessons\ny3 = np.array(df_student1['percent_correct'])\nplt.title(' Number of students vs problems answered correctly')\nplt.xlabel('Percent of problems answered correctly')\nplt.ylabel('Number of students')\nplt.hist(y3, bins = 40)\n\n\n# In[24]:\n\ndf2.columns\n\n\n# In[25]:\n\ndf2['subject'].unique()\n\n\n# In[26]:\n\ndf2.iloc[0]\n\n\n# In[27]:\n\ndf2.iloc[0]['response']\n\n\n# In[28]:\n\ndf_lesson1 = df2.groupby('lesson').agg({ 'student_id':[len, pd.Series.nunique ], 'ntotal':sum, 'nright':sum })\n\n\n# In[29]:\n\ndf_lesson1['percent_correct'] = df_lesson1['nright']['sum'].astype(float) / df_lesson1['ntotal']['sum']\n\n\n# In[30]:\n\n# Lessons and answers\ny3 = np.array(df_lesson1['percent_correct'])\nplt.title(' Lessons: percent of correct answers per lesson histogram')\nplt.xlabel('Percent of problems answered correctly')\nplt.ylabel('Number of lessons')\nplt.hist(y3, bins = 40)\n\n\n# In[34]:\n\ndf_lesson1\n\n\n# In[36]:\n\ndf_lesson1.sort_values('percent_correct')\n\n\n# In[ ]:\n\n\n\n\n# In[37]:\n\ndf3 = df2.copy()\n\n\n# In[38]:\n\ndf3['percent_correct'] = df3['nright'].astype(float) / df3['ntotal']\n\n\n# In[ ]:\n\n## Make 'description' a feature wih important words mapped\n\n\n# In[47]:\n\ndf3.columns\n\n\n# In[50]:\n\ndf3.iloc[0]\n\n\n# In[51]:\n\ndf3.iloc[0]['txt']\n\n\n# In[52]:\n\ndf3.iloc[0]['description']\n\n\n# In[69]:\n\nfor idx in range(100):\n print (df3.iloc[idx]['lesson'])\n print (df3.iloc[idx]['response'])\n\n\n# In[100]:\n\nmy_val = (str(df3.iloc[0]['response']))\nmy_val = my_val.replace(\"': \",\"_\")\nmy_val = my_val.replace(\"_{\",\" \")\nmy_val = my_val.replace(\"_[\",\", \")\nfor c in [']','[','{','}',\"'\",\"\"]:\n my_val = my_val.replace(c,'')\n\n\n# In[101]:\n\nmy_val\n\n\n# In[95]:\n\nstr(df3.iloc[0]['response'])\n\n\n# In[124]:\n\ndef stringify_response(resp):\n my_val = str(resp).replace(\"': \",\"_\")\n my_val = my_val.replace(\"_{\",\" \")\n my_val = my_val.replace(\"_[\",\", \")\n for c in [']','[','{','}',\"'\",\"\",\",\"]:\n my_val = my_val.replace(c,'')\n return my_val\n\n\n# In[125]:\n\nstringify_response(df3.iloc[0]['response'])\n\n\n# In[126]:\n\ndf3['response_str'] = df3['response'].apply(stringify_response)\n\n\n# In[127]:\n\nfor idx in range(20):\n print (idx, df3['response_str'].iloc[idx])\n\n\n# In[129]:\n\ndf3.columns\n\n\n# In[131]:\n\ndf3.to_csv('data_frame_with_string_response.csv')\n\n\n# In[132]:\n\ndf_lesson1.to_csv('lesson_summary.csv')\n\n\n# In[ ]:\n\n\n\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gvellut/rastachimp | [
"9c3e58e80f95d73322b0f8b323cd341574451373"
] | [
"tests/test_rastachimp.py"
] | [
"import fiona\nimport numpy as np\nfrom pytest import approx\nfrom rasterio import features\nfrom shapely.geometry import LineString, mapping\n\nfrom rastachimp import as_shapely, simplify_dp, smooth_chaikin\nfrom rastachimp.rastachimp import _densify_edges, _point_distance\n\n\ndef test_simplify_basic():\n shapes = _sample_data_basic()\n # convert to Shapely geometry\n simpl = list(simplify_dp(as_shapely(shapes), 2, True))\n\n assert 6 == len(simpl)\n\n count = {2: 0, 1: 0, 5: 0}\n for _, value in simpl:\n count[value] += 1\n assert count[2] == 2\n assert count[1] == 3\n assert count[5] == 1\n\n _to_shp(simpl, \"simpl_dp.shp\")\n\n\ndef test_smooth_basic():\n shapes = _sample_data_basic()\n\n simpl = list(simplify_dp(as_shapely(shapes), 0.9, True))\n smooth = list(smooth_chaikin(simpl, 5, True))\n\n assert 6 == len(smooth)\n\n count = {2: 0, 1: 0, 5: 0}\n for _, value in smooth:\n count[value] += 1\n assert count[2] == 2\n assert count[1] == 3\n assert count[5] == 1\n\n _to_shp(smooth, \"smooth_chaikin.shp\")\n\n\ndef test_densify():\n # 12 points\n coords = np.arange(24).reshape(12, 2)\n edge = LineString(coords)\n\n # subdivide once (n=1)\n ds_edges, _, _ = _densify_edges(None, [edge], None, n=1, keep_border=False)\n ds_coords = ds_edges[0].coords\n\n # 2 new segments for each input\n assert len(ds_coords) == 23\n # check that coordinates of input poins are still the same\n for i in range(0, 23, 2):\n assert tuple(ds_coords[i]) == tuple(coords[i // 2])\n\n # check new segment is colinear with the original input\n np_coords = np.array(ds_coords)\n for i in range(1, 23, 2):\n # points at i are new, those at i+1 and i-1 are same as\n # input\n x1, y1 = np_coords[i] - np_coords[i - 1]\n x2, y2 = np_coords[i + 1] - np_coords[i - 1]\n assert x1 * y2 - x2 * y1 == approx(0)\n\n # all segments have the same length ~2.82\n ds_edges, _, _ = _densify_edges(\n None, [edge], max_distance=1, n=None, keep_border=False\n )\n ds_coords = ds_edges[0].coords\n\n # for max_distance=1 => 3 new segments for each input\n assert len(ds_coords) == 34\n dists = _point_distance(ds_coords)\n for i in range(len(dists)):\n assert dists[i] < 1\n\n\ndef _sample_data_basic():\n image = np.array(\n [\n [2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1],\n [5, 5, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1],\n [5, 5, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1],\n [1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1],\n [1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1],\n [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n ],\n dtype=np.uint8,\n )\n\n shapes = features.shapes(image)\n return shapes\n\n\ndef _to_shp(transf, dst):\n meta = {\n \"schema\": {\"geometry\": \"Polygon\", \"properties\": {\"VALUE\": \"int:10\"}},\n \"crs\": \"EPSG:3857\",\n }\n with fiona.open(dst, \"w\", driver=\"Shapefile\", **meta) as sink:\n for s in transf:\n f = {\"geometry\": mapping(s[0]), \"properties\": {\"VALUE\": int(s[1])}}\n sink.write(f)\n"
] | [
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sneaxiy/DALI | [
"9692aacc14d43a9e0b2fec3369c4a9266f5eab8b"
] | [
"dali/test/python/test_utils.py"
] | [
"# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport nvidia.dali.types as types\nimport nvidia.dali as dali\nfrom nvidia.dali.backend_impl import TensorListGPU, TensorGPU, TensorListCPU\n\nimport tempfile\nimport subprocess\nimport os\nimport sys\nimport random\nimport re\n\ndef get_dali_extra_path():\n try:\n dali_extra_path = os.environ['DALI_EXTRA_PATH']\n except KeyError:\n print(\"WARNING: DALI_EXTRA_PATH not initialized.\", file=sys.stderr)\n dali_extra_path = \".\"\n return dali_extra_path\n\n\n# those functions import modules on demand to no impose additional dependency on numpy or matplot\n# to test that are using these utilities\nnp = None\nassert_array_equal = None\nassert_allclose = None\ncp = None\n\n\ndef import_numpy():\n global np\n global assert_array_equal\n global assert_allclose\n import numpy as np\n from numpy.testing import assert_array_equal, assert_allclose\n\n\ndef import_cupy():\n global cp\n import cupy as cp\n\n\nImage = None\n\n\ndef import_pil():\n global Image\n from PIL import Image\n\n\ndef save_image(image, file_name):\n import_numpy()\n import_pil()\n if image.dtype == np.float32:\n min = np.min(image)\n max = np.max(image)\n if min >= 0 and max <= 1:\n image = image * 256\n elif min >= -1 and max <= 1:\n image = ((image + 1) * 128)\n elif min >= -128 and max <= 127:\n image = image + 128\n else:\n image = (image - np.iinfo(image.dtype).min) * (255.0 / (np.iinfo(image.dtype).max - np.iinfo(image.dtype).min))\n image = image.astype(np.uint8)\n Image.fromarray(image).save(file_name)\n\n\ndef get_gpu_num():\n sp = subprocess.Popen(['nvidia-smi', '-L'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n out_str = sp.communicate()\n out_list = out_str[0].split('\\n')\n out_list = [elm for elm in out_list if len(elm) > 0]\n return len(out_list)\n\n\n# If the `max_allowed_error` is not None, it's checked instead of comparing mean error with `eps`.\ndef check_batch(\n batch1, batch2, batch_size=None, eps=1e-07, max_allowed_error=None, expected_layout=None,\n compare_layouts=True):\n \"\"\"Compare two batches of data, be it dali TensorList or list of numpy arrays.\n\n Args:\n batch1: input batch\n batch2: input batch\n batch_size: reference batch size - if None, only equality is enforced\n eps (float, optional): Used for mean error validation. Defaults to 1e-07.\n max_allowed_error (int or float, optional): If provided the max diff between elements.\n expected_layout (str, optional): If provided, the batches that are DALI types will be checked\n to match this layout. If None, there will be no check\n compare_layouts (bool, optional): Whether to compare layouts between two batches.\n Checked only if both inputs are DALI types. Defaults to True.\n \"\"\"\n\n def is_error(mean_err, max_err, eps, max_allowed_error):\n if max_allowed_error is not None:\n if max_err > max_allowed_error:\n return True\n elif mean_err > eps:\n return True\n return False\n\n import_numpy()\n if isinstance(batch1, dali.backend_impl.TensorListGPU):\n batch1 = batch1.as_cpu()\n if isinstance(batch2, dali.backend_impl.TensorListGPU):\n batch2 = batch2.as_cpu()\n\n if batch_size is None:\n batch_size = len(batch1)\n\n def _verify_batch_size(batch):\n if isinstance(batch, dali.backend.TensorListCPU) or isinstance(batch, list):\n tested_batch_size = len(batch)\n else:\n tested_batch_size = batch.shape[0]\n assert tested_batch_size == batch_size, \\\n \"Incorrect batch size. Expected: {}, actual: {}\".format(batch_size, tested_batch_size)\n\n _verify_batch_size(batch1)\n _verify_batch_size(batch2)\n\n # Check layouts where possible\n for batch in [batch1, batch2]:\n if expected_layout is not None and isinstance(batch, dali.backend.TensorListCPU):\n assert batch.layout() == expected_layout, \\\n 'Unexpected layout, expected \"{}\", got \"{}\".'.format(expected_layout,\n batch.layout())\n\n if compare_layouts and \\\n isinstance(batch1, dali.backend.TensorListCPU) and \\\n isinstance(batch2, dali.backend.TensorListCPU):\n assert batch1.layout() == batch2.layout(), \\\n 'Layout mismatch \"{}\" != \"{}\"'.format(batch1.layout(), batch2.layout())\n\n for i in range(batch_size):\n # This allows to handle list of Tensors, list of np arrays and TensorLists\n left = np.array(batch1[i])\n right = np.array(batch2[i])\n is_failed = False\n assert left.shape == right.shape, \\\n \"Shape mismatch {} != {}\".format(left.shape, right.shape)\n assert left.size == right.size, \\\n \"Size mismatch {} != {}\".format(left.size, right.size)\n if left.size != 0:\n try:\n # abs doesn't handle overflow for uint8, so get minimal value of a-b and b-a\n diff1 = np.abs(left - right)\n diff2 = np.abs(right - left)\n absdiff = np.minimum(diff2, diff1)\n err = np.mean(absdiff)\n max_err = np.max(absdiff)\n min_err = np.min(absdiff)\n total_errors = np.sum(absdiff != 0)\n except:\n is_failed = True\n if is_failed or is_error(err, max_err, eps, max_allowed_error):\n error_msg = (\"Mean error: [{}], Min error: [{}], Max error: [{}]\" +\n \"\\n Total error count: [{}], Tensor size: [{}], Error calculation failed: [{}]\").format(\n err, min_err, max_err, total_errors, absdiff.size, is_failed)\n try:\n save_image(left, \"err_1.png\")\n save_image(right, \"err_2.png\")\n except:\n print(\"Batch at {} can't be saved as an image\".format(i))\n print(left)\n print(right)\n np.save(\"err_1.npy\", left)\n np.save(\"err_2.npy\", right)\n assert False, error_msg\n\ndef compare_pipelines(pipe1, pipe2, batch_size, N_iterations, eps=1e-07, max_allowed_error=None,\n expected_layout=None, compare_layouts=True):\n \"\"\"Compare the outputs of two pipelines across several iterations.\n\n Args:\n pipe1: input pipeline object.\n pipe2: input pipeline object.\n batch_size (int): batch size\n N_iterations (int): Number of iterations used for comparison\n eps (float, optional): Allowed mean error between samples. Defaults to 1e-07.\n max_allowed_error (int or float, optional): If provided the max diff between elements.\n expected_layout (str or tuple of str, optional): If provided the outputs of both pipelines\n will be matched with provided layouts and error will be raised if there is mismatch.\n Defaults to None.\n compare_layouts (bool, optional): Whether to compare layouts of outputs between pipelines.\n Defaults to True.\n \"\"\"\n pipe1.build()\n pipe2.build()\n for _ in range(N_iterations):\n out1 = pipe1.run()\n out2 = pipe2.run()\n assert len(out1) == len(out2)\n for i in range(len(out1)):\n out1_data = out1[i].as_cpu() if isinstance(out1[i][0], dali.backend_impl.TensorGPU) \\\n else out1[i]\n out2_data = out2[i].as_cpu() if isinstance(out2[i][0], dali.backend_impl.TensorGPU) \\\n else out2[i]\n if isinstance(expected_layout, tuple):\n current_expected_layout = expected_layout[i]\n else:\n current_expected_layout = expected_layout\n check_batch(out1_data, out2_data, batch_size, eps, max_allowed_error,\n expected_layout=current_expected_layout, compare_layouts=compare_layouts)\n\n\nclass RandomDataIterator(object):\n def __init__(self, batch_size, shape=(10, 600, 800, 3), dtype=None, seed=0):\n import_numpy()\n # to avoid any numpy reference in the interface\n if dtype is None:\n dtype = np.uint8\n self.batch_size = batch_size\n self.test_data = []\n self.np_rng = np.random.default_rng(seed=seed)\n for _ in range(self.batch_size):\n if dtype == np.float32:\n self.test_data.append(\n np.array(self.np_rng.random(shape) * (1.0), dtype=dtype) - 0.5)\n else:\n self.test_data.append(\n np.array(self.np_rng.random(shape) * 255, dtype=dtype))\n\n def __iter__(self):\n self.i = 0\n self.n = self.batch_size\n return self\n\n def __next__(self):\n batch = self.test_data\n self.i = (self.i + 1) % self.n\n return (batch)\n\n next = __next__\n\n\nclass RandomlyShapedDataIterator(object):\n def __init__(\n self, batch_size, min_shape=None, max_shape=(10, 600, 800, 3),\n seed=12345, dtype=None):\n import_numpy()\n # to avoid any numpy reference in the interface\n if dtype is None:\n dtype = np.uint8\n self.batch_size = batch_size\n self.test_data = []\n self.min_shape = min_shape\n self.max_shape = max_shape\n self.dtype = dtype\n self.seed = seed\n self.np_rng = np.random.default_rng(seed=seed)\n self.rng = random.Random(seed)\n\n def __iter__(self):\n self.i = 0\n self.n = self.batch_size\n return self\n\n def __next__(self):\n import_numpy()\n self.test_data = []\n for _ in range(self.batch_size):\n # Scale between 0.5 and 1.0\n if self.min_shape is None:\n shape = [\n int(self.max_shape[dim] * (0.5 + self.rng.random() * 0.5))\n for dim in range(len(self.max_shape))]\n else:\n shape = [self.rng.randint(min_s, max_s)\n for min_s, max_s in zip(self.min_shape, self.max_shape)]\n if self.dtype == np.float32:\n self.test_data.append(\n np.array(self.np_rng.random(shape) * (1.0), dtype=self.dtype) - 0.5)\n else:\n self.test_data.append(\n np.array(self.np_rng.random(shape) * 255, dtype=self.dtype))\n\n batch = self.test_data\n self.i = (self.i + 1) % self.n\n return (batch)\n\n next = __next__\n\n\nclass ConstantDataIterator(object):\n def __init__(self, batch_size, sample_data, dtype):\n import_numpy()\n self.batch_size = batch_size\n self.test_data = []\n for _ in range(self.batch_size):\n self.test_data.append(np.array(sample_data, dtype=dtype))\n\n def __iter__(self):\n self.i = 0\n self.n = self.batch_size\n return self\n\n def __next__(self):\n batch = self.test_data\n self.i = (self.i + 1) % self.n\n return (batch)\n\n next = __next__\n\n\ndef check_output(outputs, ref_out, ref_is_list_of_outputs=None):\n \"\"\"Checks the outputs of the pipeline.\n\n `outputs`\n return value from pipeline `run`\n `ref_out`\n a batch or tuple of batches\n `ref_is_list_of_outputs`\n only meaningful when there's just one output - if True, ref_out is a one-lement\n list containing a single batch for output 0; otherwise ref_out _is_ a batch\n \"\"\"\n import_numpy()\n if ref_is_list_of_outputs is None:\n ref_is_list_of_outputs = len(outputs) > 1\n\n assert(ref_is_list_of_outputs or (len(outputs) == 1))\n\n for idx in range(len(outputs)):\n out = outputs[idx]\n ref = ref_out[idx] if ref_is_list_of_outputs else ref_out\n if isinstance(out, dali.backend_impl.TensorListGPU):\n out = out.as_cpu()\n for i in range(len(out)):\n if not np.array_equal(out[i], ref[i]):\n print(\"Out: \", out.at(i))\n print(\"Ref: \", ref[i])\n assert(np.array_equal(out[i], ref[i]))\n\n\ndef dali_type(t):\n import_numpy()\n if t is None:\n return None\n if t is np.float16:\n return types.FLOAT16\n if t is np.float32:\n return types.FLOAT\n if t is np.uint8:\n return types.UINT8\n if t is np.int8:\n return types.INT8\n if t is np.uint16:\n return types.UINT16\n if t is np.int16:\n return types.INT16\n if t is np.uint32:\n return types.UINT32\n if t is np.int32:\n return types.INT32\n raise TypeError(\"Unsupported type: \" + str(t))\n\n\ndef py_buffer_from_address(address, shape, dtype, gpu=False):\n buff = {'data': (address, False), 'shape': tuple(shape), 'typestr': dtype}\n\n class py_holder(object):\n pass\n\n holder = py_holder()\n holder.__array_interface__ = buff\n holder.__cuda_array_interface__ = buff\n if not gpu:\n import_numpy()\n return np.array(holder, copy=False)\n else:\n import_cupy()\n return cp.asanyarray(holder)\n\n\nclass check_output_pattern():\n def __init__(self, pattern, is_regexp=True):\n self.pattern_ = pattern\n self.is_regexp_ = is_regexp\n\n def __enter__(self):\n self.bucket_out_ = tempfile.TemporaryFile(mode='w+')\n self.bucket_err_ = tempfile.TemporaryFile(mode='w+')\n self.stdout_fileno_ = 1\n self.stderr_fileno_ = 2\n self.old_stdout_ = os.dup(self.stdout_fileno_)\n self.old_stderr_ = os.dup(self.stderr_fileno_)\n os.dup2(self.bucket_out_.fileno(), self.stdout_fileno_)\n os.dup2(self.bucket_err_.fileno(), self.stderr_fileno_)\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.bucket_out_.seek(0)\n self.bucket_err_.seek(0)\n os.dup2(self.old_stdout_, self.stdout_fileno_)\n os.dup2(self.old_stderr_, self.stderr_fileno_)\n our_data = self.bucket_out_.read()\n err_data = self.bucket_err_.read()\n\n pattern_found = False\n if self.is_regexp_:\n pattern = re.compile(self.pattern_)\n pattern_found = pattern.search(our_data) or pattern.search(err_data)\n else:\n pattern_found = self.pattern_ in our_data or self.pattern_ in err_data,\n\n assert pattern_found, \"Pattern: ``{}`` \\n not found in out: \\n``{}`` \\n and in err: \\n ```{}```\".format(\n self.pattern_, our_data, err_data)\n\n\ndef dali_type_to_np(type):\n import_numpy()\n\n dali_types_to_np_dict = {\n types.BOOL: np.bool_,\n types.INT8: np.int8,\n types.INT16: np.int16,\n types.INT32: np.int32,\n types.INT64: np.int64,\n types.UINT8: np.uint8,\n types.UINT16: np.uint16,\n types.UINT32: np.uint32,\n types.UINT64: np.uint64,\n types.FLOAT16: np.float16,\n types.FLOAT: np.float32,\n types.FLOAT64: np.float64,\n }\n return dali_types_to_np_dict[type]\n\n\ndef np_type_to_dali(type):\n import_numpy()\n\n np_types_to_dali_dict = {\n np.bool_: types.BOOL,\n np.int8: types.INT8,\n np.int16: types.INT16,\n np.int32: types.INT32,\n np.int64: types.INT64,\n np.uint8: types.UINT8,\n np.uint16: types.UINT16,\n np.uint32: types.UINT32,\n np.uint64: types.UINT64,\n np.float16: types.FLOAT16,\n np.float32: types.FLOAT,\n np.float64: types.FLOAT64,\n }\n return np_types_to_dali_dict[type]\n\n\ndef read_file_bin(filename):\n \"\"\"\n Read file as bytes and insert it into numpy array\n :param filename: path to the file\n :return: numpy array\n \"\"\"\n import_numpy()\n return np.fromfile(filename, dtype='uint8')\n\n\ndef filter_files(dirpath, suffix):\n \"\"\"\n Read all file names recursively from a directory and filter those, which end with given suffix\n :param dirpath: Path to directory, from which the file names will be read\n :param suffix: String, which will be used to filter the files\n :return: List of file names\n \"\"\"\n fnames = []\n for dir_name, subdir_list, file_list in os.walk(dirpath):\n flist = filter(lambda fname: fname.endswith(suffix), file_list)\n flist = map(lambda fname: os.path.join(dir_name, fname), flist)\n fnames.extend(flist)\n return fnames\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.avg_last_n = 0\n self.max_val = 0\n\n def update(self, val, n=1):\n self.val = val\n self.max_val = max(self.max_val, val)\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef to_array(dali_out):\n import_numpy()\n if isinstance(dali_out, (TensorGPU, TensorListGPU)):\n dali_out = dali_out.as_cpu()\n if isinstance(dali_out, TensorListCPU):\n dali_out = dali_out.as_array()\n return np.array(dali_out)\n\ndef module_functions(cls, prefix = \"\", remove_prefix = \"\"):\n res = []\n if len(cls.__dict__.keys()) == 0:\n prefix = prefix.replace(remove_prefix, \"\")\n prefix = prefix.lstrip('.')\n if len(prefix):\n prefix += '.'\n else:\n prefix = \"\"\n res.append(prefix + cls.__name__)\n else:\n for c in cls.__dict__.keys():\n if not c.startswith(\"_\") and c not in sys.builtin_module_names:\n c = cls.__dict__[c]\n res += module_functions(c, cls.__name__, remove_prefix = remove_prefix)\n return res\n\ndef get_files(path, ext):\n full_path = os.path.join(get_dali_extra_path(), path)\n audio_files = [\n os.path.join(full_path, f) for f in os.listdir(full_path) \\\n if re.match(f\".*\\.{ext}\", f) is not None\n ]\n return audio_files\n\n\ndef _test_skipped(reason=None):\n print(\"Test skipped.\" if reason is None else f\"Test skipped: {reason}\")\n\n\ndef restrict_python_version(major, minor=None):\n\n def decorator(test_case):\n version_info = sys.version_info\n if version_info.major > major or \\\n (version_info.major == major and (minor is None or version_info.minor >= minor)):\n return test_case\n return lambda: _test_skipped(f\"Insufficient Python version {version_info.major}.{version_info.minor} - required {major}.{minor}\")\n\n return decorator\n"
] | [
[
"numpy.fromfile",
"numpy.minimum",
"numpy.abs",
"numpy.array_equal",
"numpy.min",
"numpy.save",
"numpy.max",
"numpy.mean",
"numpy.iinfo",
"numpy.array",
"numpy.sum",
"numpy.random.default_rng"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
g1910/webdataset | [
"7519b372777e3a0dac7be7f0ecfeb1b7535351e3"
] | [
"webdataset/dataset.py"
] | [
"#!/usr/bin/python\n#\n# Copyright (c) 2017-2019 NVIDIA CORPORATION. All rights reserved.\n# This file is part of the WebDataset library.\n# See the LICENSE file for licensing terms (BSD-style).\n#\n\n\n\"\"\"Train PyTorch models directly from POSIX tar archive, locally\nor over HTTP connections.\n\"\"\"\n\n__all__ = \"\"\"Dataset tariterator default_handlers imagehandler\nreraise_exception ignore_and_continue warn_and_continue ignore_and_stop warn_and_stop\n\"\"\".split()\n\nimport os\nimport random\nimport warnings\nimport itertools as itt\n\nimport braceexpand\nfrom torch.utils.data import IterableDataset, DataLoader\n\nfrom . import tariterators\nfrom . import iterators\nfrom . import autodecode\nfrom . import shardcache\nfrom . import dbcache\nfrom . import utils\nfrom . import gopen\nfrom .utils import reraise_exception, lookup_sym, safe_eval\n\n\ndefault_cache_dir = os.path.expanduser(os.environ.get(\"WEBDATASET_CACHE\", \"\"))\ndefault_cache_name = lookup_sym(\n os.environ.get(\"WEBDATASET_CACHE_NAME\", \"shard_uuid\"), \".shardcache\".split()\n)\ndefault_cache_verbose = int(safe_eval(os.environ.get(\"WEBDATASET_CACHE_VERBOSE\", \"1\")))\ndefault_cache_size = int(\n float(safe_eval(os.environ.get(\"WEBDATASET_CACHE_SIZE\", \"1e15\")))\n)\n\n\nclass Composable:\n def __init__(self):\n super().__init__()\n\n def source_(self, source):\n self.source = source\n return self\n\n def then(self, f, *args, length=True, **kw):\n \"\"\"Compose this processor with a new processor defined by a function.\n\n The function is of the form:\n\n def my_process(source, ...):\n for sample in source:\n ...\n result = ...\n yield result\n \"\"\"\n assert callable(f)\n assert \"source\" not in kw\n # print(\"Processor\", args, kw)\n return Processor(self, f, *args, **kw)\n\n def compose(self, constructor, *args, **kw):\n \"\"\"Compose this processor with another IterableDataset.\n\n The constructor should be of the form `__init__(self, source_dataset, ...)`\n \"\"\"\n assert callable(constructor)\n return constructor(*args, **kw).source_(self)\n\n\nclass SplitByNode:\n\n def __init__(self, group=None):\n self.rank = -1\n self.size = -1\n try:\n import torch\n if not torch.distributed.is_available() or not torch.distributed.is_initialized():\n return\n except Exception as e:\n print(e)\n return\n if group is None:\n # group = torch.distributed.group.WORLD\n try:\n # some versions of torch don't like group=None\n import torch.distributed.distributed_c10d\n group = torch.distributed.distributed_c10d._default_pg\n except:\n pass\n self.rank = torch.distributed.get_rank(group=group)\n self.size = torch.distributed.get_world_size(group=group)\n\n def __call__(self, urls):\n urls = [url for url in urls]\n assert isinstance(urls, list)\n if self.size > 1:\n import socket\n gopen.info[\"rank\"] = self.rank\n gopen.info[\"size\"] = self.size\n gopen.info[\"host\"] = socket.gethostname()\n gopen.info[\"pid\"] = os.getpid()\n if self.rank == 0 and len(urls) < self.size:\n warnings.warn(f\"world_size {self.size} > num_shards {len(urls)}\")\n return urls[self.rank::self.size]\n else:\n return urls\n\n\ndef split_by_worker(urls):\n \"\"\"Selects a subset of urls based on Torch get_worker_info.\n\n Used as a shard selection function in Dataset.\"\"\"\n import torch\n\n urls = [url for url in urls]\n\n assert isinstance(urls, list)\n\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n wid = worker_info.id\n num_workers = worker_info.num_workers\n gopen.info[\"worker_id\"] = wid\n gopen.info[\"num_workers\"] = num_workers\n if wid == 0 and len(urls) < num_workers:\n warnings.warn(f\"num_workers {num_workers} > num_shards {len(urls)}\")\n return urls[wid::num_workers]\n else:\n return urls\n\n\nclass ShardList(IterableDataset, Composable):\n def __init__(\n self,\n urls,\n shuffle=False,\n nodesplitter=True,\n splitter=split_by_worker,\n length=None,\n ):\n super().__init__()\n self.shuffle = shuffle\n self.length = length\n if nodesplitter is True:\n nodesplitter = SplitByNode()\n self.nodesplitter = nodesplitter\n self.splitter = splitter\n if isinstance(urls, str):\n urls = list(braceexpand.braceexpand(urls))\n else:\n urls = list(urls)\n self.urls = urls\n assert isinstance(self.urls[0], str)\n\n def __iter__(self):\n urls = list(self.urls)\n if self.nodesplitter is not None:\n urls = list(self.nodesplitter(urls))\n if self.splitter is not None:\n urls = list(self.splitter(urls))\n if callable(self.shuffle):\n self.shuffle(urls)\n elif self.shuffle:\n random.shuffle(urls)\n for url in urls:\n yield dict(url=url)\n\n def __len__(self):\n if self.length is None:\n raise ValueError(\n \"length requested, but no length specified for ShardIterator\"\n )\n return self.length\n\n\nclass Shorthands:\n def __init__(self):\n super().__init__()\n\n def batched(self, batchsize, collation_fn=iterators.default_collation_fn, partial=True):\n return self.then(iterators.batched, batchsize=batchsize, collation_fn=collation_fn, partial=partial)\n\n def unbatched(self):\n return self.then(iterators.unbatched)\n\n def shuffle(self, size, **kw):\n if size < 1:\n return self\n return self.then(iterators.shuffle, size, **kw)\n\n def map(self, f, handler=reraise_exception):\n return self.then(iterators.map, f, handler=handler)\n\n def decode(\n self,\n *args,\n pre=None,\n post=None,\n handler=reraise_exception,\n ):\n # for backwards compatibility\n handlers = [\n autodecode.ImageHandler(h) if isinstance(h, str) else h for h in args\n ]\n decoder = autodecode.Decoder(handlers, pre=pre, post=post)\n return self.map(decoder, handler=handler)\n\n def rename(self, handler=reraise_exception, **kw):\n return self.then(iterators.rename, handler=handler, _kwa=kw)\n\n def map_dict(self, handler=reraise_exception, **kw):\n return self.then(iterators.map_dict, handler=handler, _kwa=kw)\n\n def select(self, predicate, **kw):\n return self.then(iterators.select, predicate, _kwa=kw)\n\n def to_tuple(self, *args, handler=reraise_exception):\n return self.then(iterators.to_tuple, *args, handler=handler)\n\n def map_tuple(self, *args, handler=reraise_exception):\n return self.then(iterators.map_tuple, *args, handler=handler)\n\n def pipe(self, f, *args, **kw):\n return self.then(f, *args, _kwa=kw)\n\n def dbcache(self, fname, size):\n return self.compose(dbcache.DBCache, fname, size)\n\n def slice(self, *args):\n return self.pipe(itt.islice, *args)\n\n def repeat(\n self,\n nepochs=None,\n nbatches=None,\n nsamples=None,\n batchsize=utils.guess_batchsize,\n ):\n return self.compose(\n Repeatedly,\n nepochs=nepochs,\n nbatches=nbatches,\n nsamples=nsamples,\n batchsize=batchsize,\n )\n\n\nclass Repeatedly(IterableDataset, Composable, Shorthands):\n def __init__(self, **kw):\n self.kw = kw\n\n def __iter__(self):\n return utils.repeatedly(self.source, **self.kw)\n\n def __len__(self):\n return len(self.source)\n\n\nclass Processor(IterableDataset, Composable, Shorthands):\n def __init__(self, source, f, *args, _kwa={}, length=True, **kw):\n super().__init__()\n assert callable(f)\n self.source = source\n self.f = f\n self.args = args\n self.kw = dict(_kwa)\n self.kw.update(kw)\n self.length = length\n\n def source_(self, source):\n self.source = source\n return self\n\n def __iter__(self):\n assert (\n self.source is not None\n ), f\"must set source before calling iter {self.f} {self.args} {self.kw}\"\n assert callable(self.f), self.f\n return self.f(iter(self.source), *self.args, **self.kw)\n\n def __len__(self):\n if self.length is True:\n return len(self.source)\n elif isinstance(self.length, int):\n return self.length\n elif callable(self.length):\n return self.length(self.source)\n else:\n raise ValueError(f\"{self.length}: not a valid length specification\")\n\n\ndef WebDataset(\n urls,\n shardshuffle=True,\n cache_dir=default_cache_dir,\n cache_size=default_cache_size,\n cache_name=default_cache_name,\n cache_verbose=default_cache_verbose,\n splitter=split_by_worker,\n nodesplitter=True,\n handler=reraise_exception,\n length=None,\n):\n result = ShardList(\n urls,\n shuffle=shardshuffle,\n splitter=splitter,\n nodesplitter=nodesplitter,\n length=length,\n )\n result = result.then(tariterators.url_opener, handler=handler)\n if cache_dir != \"\":\n result = result.then(\n shardcache.cache_shards,\n cache_dir=cache_dir,\n cache_size=cache_size,\n cache_name=cache_name,\n verbose=cache_verbose,\n )\n result = result.then(tariterators.tar_file_expander, length=None, handler=handler)\n result = result.then(tariterators.group_by_keys, length=length)\n return result\n\n\ndef WebLoader(*args, **kw):\n return Processor(DataLoader(*args, **kw), utils.identity)\n\n\nclass ResizedDataset(IterableDataset):\n \"\"\"Change the actual and nominal length of an IterableDataset.\n\n :param dataset: IterableDataset\n :param length: declared length of the dataset\n :param nominal: nominal length of dataset (if different from declared)\n\n This will continuously iterate through the original dataset, but\n impose new epoch boundaries at the given length/nominal.\n This exists mainly as a workaround for the odd logic in DataLoader.\n It is also useful for choosing smaller nominal epoch sizes with\n very large datasets.\n\n \"\"\"\n\n def __init__(self, dataset, length=None, nominal=None):\n super().__init__()\n self.dataset = dataset\n if length is None:\n length = len(dataset)\n self.length = length\n self.nominal = self.length if nominal is None else nominal\n self.source = None\n\n def __len__(self):\n return self.nominal\n\n def __getstate__(self):\n result = dict(self.__dict__)\n result[\"source\"] = None\n return result\n\n def __iter__(self):\n if self.source is None:\n self.source = iter(self.dataset)\n for i in range(self.length):\n try:\n sample = next(self.source)\n except StopIteration:\n self.source = iter(self.dataset)\n sample = next(self.source)\n yield sample\n\n\nChoppedDataset = ResizedDataset\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.utils.data.get_worker_info",
"torch.distributed.is_initialized",
"torch.distributed.is_available",
"torch.distributed.get_rank",
"torch.distributed.get_world_size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
china-aqi/rim | [
"0e1de286c8debec952ed57e2ce1e1d75ec09745e"
] | [
"src/aqi_db.py"
] | [
"from functools import lru_cache\nimport datetime\nfrom typing import Tuple, List, Callable, NamedTuple\nfrom collections import namedtuple\n\nimport sqlalchemy\nimport pandas as pd\n\n\ndef get_securities():\n return pd.read_sql('securities', con=sqlalchemy.create_engine('sqlite:///../data/jq.db'))\n\n\n@lru_cache(maxsize=1)\ndef get_profit_forecast(today: str):\n assert today is not None # 这个参数是为了cache需要,只要在一个日子中,就不需要重复从数据库拿数据\n df = pd.read_sql('profit_forecast', con=sqlalchemy.create_engine('sqlite:///../data/em1.db'))\\\n .set_index('code')\\\n .drop('index', axis=1)\n return df[['eps_2019', 'eps_2020', 'eps_2021']].apply(pd.to_numeric, errors='coerce', downcast='float')\n\n\n@lru_cache(maxsize=1)\ndef get_indicator(year: str = '2018'):\n assert year == '2018'\n indicator: pd.DataFrame = pd.read_sql('indicator2018', con=sqlalchemy.create_engine('sqlite:///../data/ts.db'))\n indicator['ts_code'] = indicator['ts_code'].map(lambda x: x[:6])\n return indicator.set_index('ts_code')\\\n .drop('index', axis=1)\n\n\ndef _today() -> str:\n return datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n\n@lru_cache(maxsize=1)\ndef get_financial_indicator(today: str = _today()) -> pd.DataFrame:\n \"\"\"\n get the tushare financial indicator from ts.db\n That is a table with ts_code, end_date and grossprofit_margin column\n\n Parameters\n ----------\n today : str\n today这个参数是为了cache需要,只要在一个日子中,就不需要重复从数据库拿数据\n\n Returns\n -------\n table : DataFrame\n 剔除了毛利率异常的数据(grossprofit_margin<=0 or grossprofit_margin>=100)\n 按ts_code、end_date字典序排序\n \"\"\"\n return pd.read_sql('SELECT ts_code, end_date, grossprofit_margin FROM financial_indicator \\\n WHERE 0 <= grossprofit_margin and grossprofit_margin <= 100\\\n ORDER BY ts_code, end_date',\n con=sqlalchemy.create_engine('sqlite:///../../data/ts.db'))\\\n .set_index(['ts_code', 'end_date'])\n\n\n@lru_cache(maxsize=1)\ndef get_ts_statement(name: str, today: str = _today()) -> pd.DataFrame:\n \"\"\"\n get the statement from ts.db\n\n Parameters\n ----------\n name: str\n statement name, for example, 'balancesheet'\n today : str\n today这个参数是为了cache需要,只要在一个日子中,就不需要重复从数据库拿数据\n\n Returns\n -------\n table : DataFrame\n \"\"\"\n return pd.read_sql(f'SELECT * FROM {name}',\n con=sqlalchemy.create_engine('sqlite:///../../data/ts.db'))\\\n .set_index(['ts_code', 'end_date'])\n\n\ndef get_financial_indicator_by_code(code: str) -> pd.DataFrame:\n \"\"\" 获取某个公司最近数年的财务指标\n 输入假设:\n code 符合tushare要求的上市公司代码\n 输出规定:\n 列名同tushare的财务指标表格,包含了code的所有数据\n \"\"\"\n return get_financial_indicator().loc[code]\n\n\ndef save_profitability_index_to_db(data: List[Tuple[str, float, int, float, int]]) -> None:\n \"\"\"\n 把各个公司代码、盈利增长指标、盈利增长百分位、盈利稳定指标、盈利稳定百分位等信息保存到数据库\n\n :param data: list of tuple\n 元组中第一项为公司代码,以后依次为ms, ms rank,mg,mg rank\n\n :return: None\n \"\"\"\n p = pd.DataFrame(data, columns=['ts_code', 'mg', 'mg_rank', 'ms', 'ms_rank']).set_index('ts_code')\n p.to_sql('profitability_index', con=sqlalchemy.create_engine('sqlite:///../../data/indicator.db'),\n if_exists='replace')\n\n\n@lru_cache(maxsize=1)\ndef read_profitability_index(today: str = _today()) -> pd.DataFrame:\n \"\"\"\n 从indicator数据库中读取盈利能力指标,包括了盈利增长指标和其全市场百分位,盈利稳定性指标和其全市场百分位\n\n :param today: 日期字符串\n 此参数主要是为了cache,一般盈利能力指标在日内是不会变化的\n :return: index为ts_code的DataFrame,有4各栏位mg, mg_rank, ms and ms_rank\n \"\"\"\n return pd.read_sql('profitability_index', con=sqlalchemy.create_engine('sqlite:///../../data/indicator.db'))\\\n .set_index('ts_code')\n\n\ndef get_sw_industry_roe() -> Callable[[str], Tuple[str, str, float]]:\n \"\"\" 读入截止2018年申万行业净资产收益率\n\n Precondition:\n ==============================================================================================\n project root directory/data/wind_sw_industry_roe.scv\n 此文件中的数据来自万得,包括了申万二级行业自2007年到2018年的平均净资产收益率, 内容大致如下:\n 代码,行业名称,2007年,2008年,2009年,2010年,2011年,2012年,2013年,2014年,2015年,2016年,2017年,2018年,mean\n 801011.SI,林业Ⅱ(申万),4.92,4.63,-4.29,3.7,-7.11,-0.22,4.8,1.77,1.03,1.71,0.66,1.76,1.11\n ...\n 801881.SI,其他交运设备Ⅱ(申万),,,,,,,,5.95,4.63,9.41,14.54,7.86,8.48\n\n Post condition:\n ===============================================================================================\n 查询过后,相关的全行业净资产收益率被保存在闭包中\n :return: 闭包函数\n 输入参数申万行业指数,输出是元组,第一项行业指数,第二项行业名称,第三项行业净资产收益率\n \"\"\"\n df = pd.read_csv('../data/wind_sw_industry_roe.csv', encoding='GBK')[['代码', '行业名称', 'mean']]\n df['代码'] = df['代码'].map(lambda x: x[:6])\n df = df.set_index('代码')\n return lambda industry_index: (industry_index,\n df.loc[industry_index]['行业名称'],\n df.loc[industry_index]['mean'] / 100)\n\n\ndef get_sw_industry() -> Callable[[str], str]:\n \"\"\" 获取上市公司的申万行业(二级)代码\n\n Precondition\n =====================================================================================================\n 经营异常公司 不可查询之 TO DO:\n\n Post condition\n ====================================================================================================\n :return: 闭包函数\n 输入参数是上市公司代码,输出是申万二级行业代码\n \"\"\"\n df = pd.read_sql_table('industries', con=sqlalchemy.create_engine('sqlite:///../data/jq.db'))\n df = df.set_index('code')\n return lambda code: df.loc[_to_jq_code(code)]['sw_l2']\n\n\ndef get_company_info() -> Callable[[str], NamedTuple]:\n \"\"\" 获取上市公司的基本信息\n\n Precondition\n =====================================================================================================\n ../data/jq.db 存在\n jq.db中存在'company_info'表,其内容同 https://www.joinquant.com/help/api/help?name=JQData#上市公司基本信息\n\n Post condition\n ====================================================================================================\n :return: 闭包函数\n 输入参数是上市公司代码,输出是上市公司信息\n \"\"\"\n df = pd.read_sql_table('company_info', con=sqlalchemy.create_engine('sqlite:///../data/jq.db'))\n df = df.set_index('code')[['website', 'province', 'city', 'industry_1', 'industry_2', 'main_business']]\n return lambda code: namedtuple('CompanyInfo', df.loc[_to_jq_code(code)].index)(*df.loc[_to_jq_code(code)])\n\n\ndef get_market_value() -> Callable[[str], NamedTuple]:\n \"\"\" 获取上市公司的最近交易日的市值和相关信息\n\n Precondition\n =====================================================================================================\n ../data/jq.db 存在\n jq.db中存在'market_value'表,其内容同 https://www.joinquant.com/help/api/help?name=JQData#市值数据(每日更新)\n\n Post condition\n ====================================================================================================\n :return: 闭包函数\n 输入参数是上市公司代码,输出是上市公司最近交易日的市值和相关信息\n \"\"\"\n df = pd.read_sql_table('market_value', con=sqlalchemy.create_engine('sqlite:///../data/jq.db'))\n df = df.set_index('code')[['market_cap', 'pe_ratio', 'pb_ratio', 'ps_ratio', 'pcf_ratio']]\n return lambda code: namedtuple('market_value', df.loc[_to_jq_code(code)].index)(*df.loc[_to_jq_code(code)])\n\n\ndef _to_jq_code(code: str) -> str:\n \"\"\" 上市公司代码转换为jq风格\n \"\"\"\n return f\"{code}.XSHG\" if code[0] == '6' else f\"{code}.XSHE\"\n\n\nif __name__ == \"__main__\":\n fn = get_market_value()\n print(fn('000625'))\n\n\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
TexasInstruments/tvm | [
"c78ea878a05e262a30c3ffa250c1479a695ecf33"
] | [
"tests/python/frontend/tflite/test_forward.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument\n\"\"\"\nTFLite testcases\n================\nThis article is a test script to test TFLite operator with Relay.\n\"\"\"\nfrom __future__ import print_function\nfrom functools import partial\nimport pytest\nimport numpy as np\nimport tvm\nfrom tvm import te\nfrom tvm import relay\ntry:\n import tensorflow.compat.v1 as tf\nexcept ImportError:\n import tensorflow as tf\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import variables\ntry:\n from tensorflow import lite as interpreter_wrapper\nexcept ImportError:\n from tensorflow.contrib import lite as interpreter_wrapper\n\nfrom tvm.contrib.download import download_testdata\nimport tvm.relay.testing.tf as tf_testing\nfrom packaging import version as package_version\n\nfrom PIL import Image\nimport os\n\n#######################################################################\n# Generic run functions for TVM & TFLite\n# --------------------------------------\ndef convert_to_list(x):\n if not isinstance(x, list):\n x = [x]\n return x\n\n\n#######################################################################\n# Get a real image for e2e testing\n# --------------------------------\ndef get_real_image(im_height, im_width):\n repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/'\n img_name = 'elephant-299.jpg'\n image_url = os.path.join(repo_base, img_name)\n img_path = download_testdata(image_url, img_name, module='data')\n image = Image.open(img_path).resize((im_height, im_width))\n x = np.array(image).astype('uint8')\n data = np.reshape(x, (1, im_height, im_width, 3))\n return data\n\ndef get_real_image_object_detection(im_height, im_width):\n repo_base = 'https://github.com/dmlc/web-data/raw/master/gluoncv/detection/'\n img_name = 'street_small.jpg'\n image_url = os.path.join(repo_base, img_name)\n img_path = download_testdata(image_url, img_name, module='data')\n image = Image.open(img_path).resize((im_height, im_width))\n x = np.array(image).astype('uint8')\n data = np.reshape(x, (1, im_height, im_width, 3))\n return data\n\ndef vmobj_to_list(o):\n if isinstance(o, tvm.nd.NDArray):\n return [o.asnumpy().tolist()]\n elif isinstance(o, tvm.runtime.container.ADT):\n result = []\n for f in o:\n result.extend(vmobj_to_list(f))\n return result\n elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):\n if o.constructor.name_hint == 'Cons':\n tl = vmobj_to_list(o.fields[1])\n hd = vmobj_to_list(o.fields[0])\n hd.extend(tl)\n return hd\n elif o.constructor.name_hint == 'Nil':\n return []\n elif 'tensor_nil' in o.constructor.name_hint:\n return [0]\n elif 'tensor' in o.constructor.name_hint:\n return [o.fields[0].asnumpy()]\n else:\n raise RuntimeError(\"Unknown object type: %s\" %\n o.constructor.name_hint)\n else:\n raise RuntimeError(\"Unknown object type: %s\" % type(o))\n\ndef run_tvm_graph(tflite_model_buf, input_data, input_node, num_output=1, target='llvm',\n out_names=None, mode='graph_runtime'):\n \"\"\" Generic function to compile on relay and execute on tvm \"\"\"\n # TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1\n try:\n import tflite.Model\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n except AttributeError:\n import tflite\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n input_data = convert_to_list(input_data)\n input_node = convert_to_list(input_node)\n\n shape_dict = {}\n dtype_dict = {}\n for i, e in enumerate(input_node):\n shape_dict[e] = input_data[i].shape\n dtype_dict[e] = input_data[i].dtype.name\n\n mod, params = relay.frontend.from_tflite(tflite_model,\n shape_dict=shape_dict,\n dtype_dict=dtype_dict)\n\n if mode in ['debug', 'vm']:\n ex = relay.create_executor(mode, mod=mod, ctx=tvm.cpu(), target=\"llvm\")\n inputs = []\n for param in mod['main'].params:\n found = False\n for i, n in enumerate(input_node):\n if n == param.name_hint:\n found = True\n inputs.append(tvm.nd.array(input_data[i]))\n break\n # Interpreter doesn't bind constants, so still need to find in params\n if not found:\n inputs.append(tvm.nd.array(params[param.name_hint]))\n result = ex.evaluate()(*inputs)\n return vmobj_to_list(result)\n else:\n with tvm.transform.PassContext(opt_level=3):\n graph, lib, params = relay.build(mod, target, params=params)\n\n ctx = tvm.context(target, 0)\n from tvm.contrib import graph_runtime\n m = graph_runtime.create(graph, lib, ctx)\n # set inputs\n for i, e in enumerate(input_node):\n m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))\n\n m.set_input(**params)\n # execute\n m.run()\n # get outputs\n assert out_names is None or num_output == len(out_names), \"out_names: {} num_output: {}\".format(\n out_names, num_output)\n tvm_output_list = []\n for i in range(0, num_output):\n tvm_output = m.get_output(i)\n tvm_output_list.append(tvm_output.asnumpy())\n return tvm_output_list\n\n\ndef run_tflite_graph(tflite_model_buf, input_data):\n \"\"\" Generic function to execute TFLite \"\"\"\n input_data = convert_to_list(input_data)\n\n interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # set input\n assert len(input_data) == len(input_details)\n for i in range(len(input_details)):\n interpreter.set_tensor(input_details[i]['index'], input_data[i])\n\n # Run\n interpreter.invoke()\n\n # get output\n tflite_output = list()\n for i in range(len(output_details)):\n tflite_output.append(interpreter.get_tensor(output_details[i]['index']))\n\n return tflite_output\n\n\ndef compare_tflite_with_tvm(in_data, in_name, input_tensors,\n output_tensors, init_global_variables=False,\n out_names=None, quantized=False, input_range=None, mode='graph_runtime'):\n \"\"\"Generic function to generate and compare TFLite and TVM output\"\"\"\n in_data = convert_to_list(in_data)\n in_name = convert_to_list(in_name)\n out_names = convert_to_list(out_names)\n in_node = [0] * len(in_name)\n for i in range(len(in_name)):\n in_node[i] = in_name[i].split(':')[0] if \":\" in in_name[i] else in_name[i]\n\n with tf.Session() as sess:\n if init_global_variables:\n sess.run(variables.global_variables_initializer())\n # convert to tflite model\n converter = tf.lite.TFLiteConverter.from_session(\n sess, input_tensors, output_tensors)\n\n if quantized:\n converter.inference_type = tf.lite.constants.QUANTIZED_UINT8\n input_arrays = converter.get_input_arrays()\n input_stats = {}\n # calculate the mean and quantization scale for every input tensor,\n # with respect to its fp32 input range, defined in fake_quant.\n # s = 255/(fmax-fmin); m = -fmin*s (the zero point)\n for i in input_arrays:\n try:\n quant_scale = 255 / (input_range[i][1] - input_range[i][0])\n except ZeroDivisionError:\n raise ZeroDivisionError('Min and max of the input range for tensor ' + i + ' can\\'t be equal')\n mean = - input_range[i][0] * quant_scale\n input_stats[i] = (mean, quant_scale)\n converter.quantized_input_stats = input_stats\n\n tflite_model_buffer = converter.convert()\n tflite_output = run_tflite_graph(tflite_model_buffer, in_data)\n\n for device in [\"llvm\"]:\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n continue\n\n tvm_output = run_tvm_graph(tflite_model_buffer, in_data, in_node, target=device,\n num_output=len(out_names), out_names=out_names, mode=mode)\n\n # WARNING: the results could well be random values clipped to 0 or 255 because of badly tuned output\n # range for the specific operator. While adding test ensure that we aren't getting only clipped values\n # in output tensors that still pass the assertion. For reference see _test_elemwise_qnn_out_range()\n if quantized:\n for i in range(len(tflite_output)):\n # allow absolute tolerance of 1 in the quantized results\n tvm.testing.assert_allclose(tflite_output[i], tvm_output[i], atol=1, rtol=1e-5)\n else:\n for i in range(len(tflite_output)):\n tvm.testing.assert_allclose(tflite_output[i], tvm_output[i], atol=1e-5, rtol=1e-5)\n\n\ndef with_fused_activation_function(input_tensor, fn_name):\n if fn_name is None or fn_name == \"NONE\":\n return input_tensor\n if fn_name == \"RELU\":\n return nn_ops.relu(input_tensor)\n if fn_name == \"RELU6\":\n return nn_ops.relu6(input_tensor)\n if fn_name == \"RELU_N1_TO_1\":\n return math_ops.maximum(-1, math_ops.minimum(input_tensor, 1))\n if fn_name == \"TANH\":\n return math_ops.tanh(input_tensor)\n raise AssertionError(\"Unknown fused_activation_function {}\".format(fn_name))\n\n\ndef _test_split(in_shape, axis, num_splits, dtype):\n \"\"\"internal split tester taking as parameters in_shape, number of tensors to split into\n and dtype (data type)\"\"\"\n\n np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=in_shape, dtype=dtype, name=\"in_data\")\n out = array_ops.split(in_data, num_splits, axis=axis)\n num_splits = len(num_splits) if isinstance(num_splits, list) \\\n else num_splits\n out_names = ['out_' + str(n) + ':0' for n in range(num_splits)]\n compare_tflite_with_tvm([np_data], ['in_data'], [in_data], out,\n out_names=out_names)\n\ndef test_forward_split():\n '''test split layer'''\n # rank 1\n _test_split((3,), 0, 1, 'float32')\n _test_split((3,), 0, 3, 'float32')\n _test_split((6,), 0, 3, 'float32')\n # rank 2\n _test_split((6, 2), 0, 3, 'float32')\n _test_split((2, 6), 1, 6, 'float32')\n # rank 3\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n _test_split((6, 2, 4), 0, 2, 'int32')\n\n _test_split((2, 6, 4), 1, 3, 'float32')\n _test_split((2, 4, 6), 2, 1, 'float32')\n # rank 4\n _test_split((6, 1, 3, 5), 0, 3, 'float32')\n _test_split((1, 6, 3, 5), 1, 3, 'float32')\n _test_split((1, 3, 6, 5), 2, 3, 'float32')\n _test_split((1, 3, 5, 6), 3, 3, 'float32')\n # split along negative axis\n _test_split((6, 1, 3, 5), -4, 3, 'float32')\n _test_split((1, 6, 3, 5), -3, 3, 'float32')\n _test_split((1, 3, 6, 5), -2, 3, 'float32')\n _test_split((1, 3, 5, 6), -1, 3, 'float32')\n # size_splits split\n _test_split((6,), 0, [1, 2, 3], 'float32')\n _test_split((3, 6, 4), -2, [1, 4, 1], 'float32')\n\n#######################################################################\n# slice\n# -----\n\ndef _test_slice(data, begin, size):\n \"\"\" One iteration of SLICE \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = array_ops.slice(in_data, begin, size)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_slice():\n \"\"\" SLICE \"\"\"\n _test_slice(np.arange(4, dtype=np.float32).reshape((4, )), begin=[0], size=[2])\n _test_slice(np.arange(18, dtype=np.int32).reshape((3, 2, 3)), begin=[1, 0, 0], size=[1, 1, 3])\n # tflite 1.13 outputs nonsense values if size[i] == -1\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n _test_slice(np.arange(8, dtype=np.int32).reshape((2, 4)), begin=[0, 1], size=[-1, -1])\n _test_slice(np.arange(5, dtype=np.int32).reshape((5, )), begin=[4], size=[-1])\n\n#######################################################################\n# Topk\n# ----\ndef _test_topk(in_shape, k=1):\n \"\"\" One iteration of TOPK \"\"\"\n data = np.random.uniform(size=in_shape).astype('float32')\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = nn_ops.top_k(in_data, k, name='TopK')\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out[0]])\n\ndef test_forward_topk():\n \"\"\" TOPK \"\"\"\n _test_topk((3,), 1)\n _test_topk((3,), 3)\n _test_topk((3, 5, 7), 3)\n _test_topk((3, 5, 7), 3)\n\n#######################################################################\n# Gather\n# ------\n\ndef _test_gather(dshape, indices, axis, dtype, quantized=False, oob=False):\n \"\"\" One iteration of Gather \"\"\"\n indices = np.asarray(indices).astype('int32')\n data = np.random.uniform(1, 10, size=dshape)\n data = data.astype(np.uint8) if quantized else data.astype(dtype)\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name=\"in_data\")\n if axis:\n out = array_ops.gather(in_data, indices, axis=axis)\n else:\n out = array_ops.gather(in_data, indices) #tflite conversion fails for None axis\n input_range = {'in_data': (-100, 100)} if quantized else None\n try:\n compare_tflite_with_tvm([data], ['in_data:0'], [in_data], [out],\n quantized=quantized, input_range=input_range)\n except ValueError as e:\n if not oob:\n raise e\n except Exception as e:\n raise e\n\ndef test_forward_gather():\n \"\"\" GATHER \"\"\"\n for quantized in [False, True]:\n _test_gather((4,), [1], 0, 'float32', quantized)\n _test_gather((4,), [1], None, 'int32', quantized)\n _test_gather((1, 4), [0], 0, 'int32', quantized)\n _test_gather((4,), [[[1, 0], [0, 1]]], 0, 'float32', quantized)\n _test_gather((2, 2), [[[1, 0], [0, 1]]], 1, 'int32', quantized)\n _test_gather((2, 2), [[[1, 0], [0, 1]]], None, 'float32', quantized)\n _test_gather((3, 3, 3), [[[1, 0]]], 0, 'int32', quantized)\n _test_gather((3, 3, 3), [[[1, 0]]], 2, 'int32', quantized)\n _test_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, 'float32', quantized)\n _test_gather((3, 3, 3), [[[2, 1]]], -1, 'int32', quantized)\n _test_gather((4,), [16], 0, 'float32', quantized, oob=True)\n _test_gather((1, 3, 3), [12], 0, 'int32', quantized, oob=True)\n _test_gather((1, 3, 3), [20], 1, 'float32', quantized, oob=True)\n _test_gather((1, 3, 3), [20, 20], 2, 'float32', quantized, oob=True)\n\n#######################################################################\n# Gather_ND\n# ---------\n\ndef _test_gather_nd(data, indices):\n \"\"\" One iteration of GATHER_ND \"\"\"\n with tf.Graph().as_default():\n in_data = tf.placeholder(shape=data.shape, dtype=data.dtype, name=\"data\")\n indices_data = tf.placeholder(shape=indices.shape, dtype=indices.dtype,\n name=\"indices\")\n out = tf.gather_nd(in_data, indices_data)\n\n compare_tflite_with_tvm([data, indices], ['data:0', 'indices:0'],\n [in_data, indices_data], [out])\n\ndef test_forward_gather_nd():\n \"\"\" GATHER_ND \"\"\"\n _test_gather_nd(\n np.array([[[1.2, 2.0], [3.1, 4.1]], [[5.1, 6.1], [7.1, 8.1]]]).astype('float32'),\n np.asarray([[0, 1], [1, 0]]).astype('int32')\n )\n _test_gather_nd(\n np.reshape(np.arange(30), [5, 6]).astype('int32'),\n np.asarray([[1, 2]]).astype('int32')\n )\n _test_gather_nd(\n np.reshape(np.arange(12), [2, 3, 2]).astype('int32'),\n np.asarray([[[0, 0], [0, 1]], [[1, 0], [1, 1]]]).astype('int32')\n )\n _test_gather_nd(\n np.reshape(np.arange(4), [4]).astype('float32'),\n np.asarray([1]).astype('int32')\n )\n _test_gather_nd(\n np.reshape(np.arange(4), [1, 4]).astype('float32'),\n np.asarray([0]).astype('int32')\n )\n _test_gather_nd(\n np.reshape(np.arange(4), [1, 4]).astype('float32'),\n np.asarray([0, 3]).astype('int32')\n )\n\n#######################################################################\n# StridedSlice\n# ------------\n\ndef _test_stridedslice(ip_shape, begin, end, stride, dtype,\n begin_mask=0, end_mask=0, new_axis_mask=0,\n shrink_axis_mask=0, ellipsis_mask=0, quantized=False):\n \"\"\" One iteration of a Stridedslice \"\"\"\n data = np.random.uniform(size=ip_shape).astype(dtype)\n data = data.astype(np.uint8) if quantized else data.astype(dtype)\n with tf.Graph().as_default():\n in_data = tf.placeholder(dtype, ip_shape, name=\"in_data\")\n out = array_ops.strided_slice(in_data, begin, end, stride,\n begin_mask=begin_mask,\n end_mask=end_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask,\n ellipsis_mask=ellipsis_mask)\n input_range = {'in_data': (-100, 100)} if quantized else None\n compare_tflite_with_tvm([data], ['in_data:0'], [in_data], [out], quantized=quantized,\n input_range=input_range)\n\ndef test_forward_stridedslice():\n '''test StridedSlice'''\n for quantized in [False, True]:\n _test_stridedslice((2), [1], [1], [1], 'float32', shrink_axis_mask=1, quantized=quantized)\n _test_stridedslice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], 'float32', quantized=quantized)\n _test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], 'float32', shrink_axis_mask=0, quantized=quantized)\n _test_stridedslice((4, 4), [1, 0], [4, 4], [1, 1], 'float32', shrink_axis_mask=2, quantized=quantized)\n\n#######################################################################\n# transpose\n# ---------\n\n\ndef _test_forward_transpose(ishape, axes=()):\n data = np.random.uniform(size=ishape).astype(np.float32)\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n\n if not axes:\n out = array_ops.transpose(in_data)\n else:\n out = array_ops.transpose(in_data, axes)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_transpose():\n _test_forward_transpose((2, 2))\n _test_forward_transpose((2, 3, 4))\n _test_forward_transpose((7, 8, 8, 10))\n _test_forward_transpose((2, 3, 4), (1, 2, 0))\n _test_forward_transpose((2, 3, 4), (0, 1, 2))\n _test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))\n _test_forward_transpose((2, 3, 4, 5), ())\n\n#######################################################################\n# Cast\n# ----\n\ndef _test_cast(data, cast_dtype):\n \"\"\" One iteration of CAST \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = math_ops.cast(in_data, cast_dtype)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_cast():\n \"\"\" CAST \"\"\"\n _test_cast(np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.int32)\n _test_cast(np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.uint8)\n _test_cast(np.arange(6.0, dtype=np.int32).reshape((1, 6)), cast_dtype=tf.int64)\n\n#######################################################################\n# Batch Mat Mul\n# ----\ndef _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):\n with tf.Graph().as_default():\n A = array_ops.placeholder(shape=A_shape, dtype=dtype, name='A')\n B = array_ops.placeholder(shape=B_shape, dtype=dtype, name='B')\n result = math_ops.matmul(A, B, adjoint_a=adjoint_a,\n adjoint_b=adjoint_b, name='batchmatmul')\n\n A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)\n B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)\n compare_tflite_with_tvm([A_np, B_np], [A.name, B.name], [A, B], [result])\n\n\ndef test_forward_batch_matmul():\n \"\"\" BATCH_MAT_MUL \"\"\"\n _test_batch_matmul((3, 5, 4), (3, 4, 5), 'float32')\n _test_batch_matmul((3, 5, 4), (3, 4, 5), 'float32', True, True)\n _test_batch_matmul((3, 5, 4), (3, 5, 4), 'float32', True, False)\n _test_batch_matmul((3, 5, 4), (3, 5, 4), 'float32', False, True)\n _test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), 'float32')\n\n#######################################################################\n# Tile\n# ----\n\n\ndef _test_forward_tile(in_shape, reps, dtype):\n data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n\n out = array_ops.tile(in_data, reps)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_tile():\n _test_forward_tile((2, ), (3, ), \"int32\")\n _test_forward_tile((2, 2), (2, 3), \"float32\")\n\n######################################################################\n# BatchToSpaceND\n# --------------\n\n\ndef _test_batch_to_space_nd(input_shape, block_shape, crops, dtype='int32'):\n data = np.random.uniform(0, 5, size=input_shape).astype(dtype)\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)\n\n out = array_ops.batch_to_space_nd(in_data, block_shape, crops)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_batch_to_space_nd():\n # test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d\n _test_batch_to_space_nd(\n input_shape=[4, 1, 1, 1],\n block_shape=[2, 2],\n crops=[[0, 0], [0, 0]]\n )\n\n _test_batch_to_space_nd(\n input_shape=[4, 1, 1, 3],\n block_shape=[2, 2],\n crops=[[0, 0], [0, 0]]\n )\n\n _test_batch_to_space_nd(\n input_shape=[4, 2, 2, 1],\n block_shape=[2, 2],\n crops=[[0, 0], [0, 0]]\n )\n\n######################################################################\n# SpaceToBatchND\n# --------------\n\n\ndef _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype='int32'):\n data = np.random.uniform(0, 5, size=input_shape).astype(dtype)\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)\n\n out = array_ops.space_to_batch_nd(in_data, block_shape, paddings)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_space_to_batch_nd():\n # test cases: https://www.tensorflow.org/api_docs/python/tf/space_to_batch_nd\n _test_space_to_batch_nd(\n input_shape=[1, 2, 2, 1],\n block_shape=[2, 2],\n paddings=[[0, 0], [0, 0]]\n )\n\n _test_space_to_batch_nd(\n input_shape=[1, 2, 2, 3],\n block_shape=[2, 2],\n paddings=[[0, 0], [0, 0]]\n )\n\n _test_space_to_batch_nd(\n input_shape=[1, 4, 4, 1],\n block_shape=[2, 2],\n paddings=[[0, 0], [0, 0]]\n )\n\n _test_space_to_batch_nd(\n input_shape=[2, 2, 4, 1],\n block_shape=[2, 2],\n paddings=[[0, 0], [2, 0]]\n )\n\n#######################################################################\n# Pooling\n# -------\ndef _test_pooling_iteration(input_shape, **kwargs):\n \"\"\" One iteration of pool operation with given shapes and attributes \"\"\"\n\n x = -np.arange(\n np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=input_shape, dtype='float32')\n out = nn_ops.pool(in_data, **kwargs)\n\n compare_tflite_with_tvm(x,'Placeholder:0', [in_data], [out])\n\n\ndef _test_pooling(input_shape, **kwargs):\n _test_pooling_iteration(input_shape, **kwargs)\n\n\ndef test_forward_pooling():\n \"\"\" Pooling \"\"\"\n\n for pool_type in ['AVG', 'MAX']:\n _test_pooling(input_shape=[2, 9, 10, 2],\n window_shape=[1, 1],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[1, 1])\n\n _test_pooling(input_shape=[2, 10, 9, 2],\n window_shape=[1, 1],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[1, 1])\n\n _test_pooling(input_shape=[2, 9, 10, 2],\n window_shape=[2, 1],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[1, 1])\n\n _test_pooling(input_shape=[2, 10, 9, 2],\n window_shape=[2, 3],\n padding='SAME',\n pooling_type=pool_type,\n dilation_rate=[1, 1],\n strides=[2, 1])\n\n\ndef _test_l2_pool2d(input_shape, ksize, strides, padding, data_format, fused_func_name=None):\n x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1\n\n with tf.Graph().as_default():\n in_data = tf.placeholder(\n dtype=tf.float32, name=\"input\", shape=input_shape)\n out = tf.sqrt(tf.nn.avg_pool(\n tf.square(in_data), ksize=ksize, strides=strides,\n padding=padding, data_format=data_format))\n out = with_fused_activation_function(out, fused_func_name)\n\n compare_tflite_with_tvm(x, 'input', [in_data], [out])\n\n\ndef test_forward_l2_pool2d():\n _test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], 'SAME', \"NHWC\", \"RELU6\")\n _test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], 'SAME', \"NHWC\", \"RELU6\")\n _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], 'SAME', \"NHWC\")\n _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], 'SAME', \"NHWC\")\n _test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], 'VALID', \"NHWC\", \"RELU\")\n _test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], 'VALID', \"NHWC\")\n _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], 'VALID', \"NHWC\")\n _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], 'VALID', \"NHWC\", \"RELU6\")\n\n\n#######################################################################\n# Convolution\n# -----------\n\ndef _test_convolution(tensor_in_sizes, filter_in_sizes,\n dilations, strides, padding, data_format,\n is_depthwise=False, quantized=False):\n \"\"\" One iteration of convolution with given shapes and attributes \"\"\"\n\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n if quantized:\n data_array = np.random.uniform(0, 255, tensor_in_sizes).astype('uint8')\n filter_array = np.random.uniform(0, 255, filter_in_sizes).astype('uint8')\n else:\n data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]\n filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32', name='in_data')\n in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32', name='in_filter')\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n\n if is_depthwise:\n out = nn_ops.depthwise_conv2d_native(in_data,\n in_filter,\n strides=strides,\n padding=padding,\n data_format=data_format)\n else:\n out = nn_ops.conv2d(in_data,\n in_filter,\n strides=strides,\n padding=padding,\n data_format=data_format)\n\n if quantized:\n # For now only quantized conv2d is supported\n assert not is_depthwise\n\n # Quantized the inputs and feed them to the convolution\n inq_data = tf.quantization.fake_quant_with_min_max_args(in_data, min=-100, max=100, name='inq_data')\n inq_filter = tf.quantization.fake_quant_with_min_max_args(in_filter, min=-100, max=100, name='inq_filter')\n out = nn_ops.conv2d(inq_data,\n inq_filter,\n strides=strides,\n padding=padding,\n data_format=data_format)\n out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name=\"out\")\n\n # Set the input quantization range\n input_range = {'in_data': (-100, 100)} if quantized else None\n\n # Compare\n compare_tflite_with_tvm(data_array, 'in_data', [in_data], [out], quantized=quantized, input_range=input_range)\n else:\n data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')\n compare_tflite_with_tvm(data_array, 'in_data', [in_data], [out])\n\n\ndef test_forward_convolution():\n for quantized in [False, True]:\n _test_convolution([4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], 'SAME', 'NHWC', quantized=quantized)\n _test_convolution([4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], 'VALID', 'NHWC', quantized=quantized)\n _test_convolution([4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], 'SAME', 'NHWC', quantized=quantized)\n _test_convolution([4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], 'VALID', 'NHWC', quantized=quantized)\n\n # depthwise convolution\n _test_convolution([4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], 'SAME', 'NHWC', True)\n _test_convolution([4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], 'VALID', 'NHWC', True)\n _test_convolution([4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], 'SAME', 'NHWC', True)\n _test_convolution([4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], 'VALID', 'NHWC', True)\n _test_convolution([4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], 'VALID', 'NHWC', True)\n # dephtwise convolution with single input channel\n _test_convolution([1, 76, 64, 1], [9, 5, 1, 96], [1, 1], [1, 1], 'SAME', 'NHWC', True)\n\n\n#######################################################################\n# Transpose Convolution\n# ---------------------\n\ndef _test_transpose_conv(tensor_in_sizes, filter_in_sizes, output_shape, strides, padding):\n \"\"\" One iteration of transpose convolution with given shapes and attributes \"\"\"\n\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]\n filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')\n in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')\n strides = [1] + strides + [1]\n # in_filter layout is HWOI\n out = nn_ops.conv2d_transpose(in_data,\n in_filter,\n output_shape=output_shape,\n strides=strides,\n padding=padding)\n data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')\n compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_transpose_conv():\n # kernel 3x3, padding VALID\n _test_transpose_conv([4, 32, 32, 16], [3, 3, 5, 16], [4, 34, 34, 5], [1, 1], 'VALID')\n _test_transpose_conv([1, 32, 32, 16], [3, 3, 5, 16], [1, 65, 65, 5], [2, 2], 'VALID')\n _test_transpose_conv([1, 32, 32, 16], [3, 3, 5, 16], [1, 65, 34, 5], [2, 1], 'VALID')\n\n # kernel 2x2, padding VALID\n _test_transpose_conv([4, 32, 32, 16], [2, 2, 5, 16], [4, 33, 33, 5], [1, 1], 'VALID')\n _test_transpose_conv([1, 32, 32, 16], [2, 2, 5, 16], [1, 64, 64, 5], [2, 2], 'VALID')\n _test_transpose_conv([1, 32, 32, 16], [2, 2, 5, 16], [1, 64, 33, 5], [2, 1], 'VALID')\n\n # kernel 1x1, padding VALID\n _test_transpose_conv([4, 32, 32, 16], [1, 1, 5, 16], [4, 32, 32, 5], [1, 1], 'VALID')\n _test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 63, 5], [2, 2], 'VALID')\n _test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 32, 5], [2, 1], 'VALID')\n\n # kernel 1x1, padding SAME\n _test_transpose_conv([4, 32, 32, 16], [1, 1, 5, 16], [4, 32, 32, 5], [1, 1], 'SAME')\n _test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 63, 5], [2, 2], 'SAME')\n _test_transpose_conv([1, 32, 32, 16], [1, 1, 5, 16], [1, 63, 32, 5], [2, 1], 'SAME')\n\n\n#######################################################################\n# Reshape\n# -------\n\ndef _test_reshape(data, out_shape):\n \"\"\" One iteration of reshape operation with given data and out shape \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = array_ops.reshape(in_data, out_shape)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_reshape():\n _test_reshape(np.arange(6.0, dtype=np.float32), [2, 3])\n _test_reshape(np.arange(6), [-1, 2])\n _test_reshape(np.arange(6), [3, -1])\n _test_reshape(np.arange(6), [-1])\n\n\n#######################################################################\n# Resize\n# ------\n\ndef _test_resize(tf_resize_op, data, align_corners):\n \"\"\" One iteration of Resize \"\"\"\n\n assert len(data) == 2\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n images_tensor = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')\n size = ops.convert_to_tensor(data[1], dtype=data[1].dtype)\n out_tensor = tf_resize_op(images=images_tensor, size=size, align_corners=align_corners)\n compare_tflite_with_tvm([data[0]], ['in:0'], [images_tensor], [out_tensor])\n\n\ndef test_all_resize():\n \"\"\" Resize \"\"\"\n data = [np.random.rand(1, 16, 16, 3).astype(\"float32\"), np.array([8, 8], dtype=np.int32)]\n ### RESIZE_BILINEAR\n _test_resize(tf.image.resize_bilinear, data, align_corners=False)\n _test_resize(tf.image.resize_bilinear, data, align_corners=True)\n ### RESIZE_NEAREST_NEIGHBOR (was added in v1.13)\n # According to topi resize.h\n # Align corners not supported for nearest neighbour\n from tflite.BuiltinOperator import BuiltinOperator\n if 'RESIZE_NEAREST_NEIGHBOR' in dir(BuiltinOperator()):\n _test_resize(tf.image.resize_nearest_neighbor, data, align_corners=False)\n\n#######################################################################\n# Range\n# -----\ndef _test_range(start, limit, delta):\n # tflite 1.13 convert method does not accept empty shapes\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n tf.reset_default_graph()\n with tf.Graph().as_default():\n start_scalar, limit_scalar, delta_scalar = \\\n tf.placeholder(dtype=start.dtype, shape=(), name=\"start\"), \\\n tf.placeholder(dtype=limit.dtype, shape=(), name=\"limit\"), \\\n tf.placeholder(dtype=delta.dtype, shape=(), name=\"delta\")\n\n out = tf.range(start_scalar, limit_scalar, delta_scalar, name=\"range\")\n\n compare_tflite_with_tvm(\n [start, limit, delta],\n [\"start\", \"limit\", \"delta\"],\n [start_scalar, limit_scalar, delta_scalar],\n [out],\n mode=\"vm\",\n quantized=False\n )\n\ndef _test_range_default():\n # tflite 1.13 convert method does not accept empty shapes\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n tf.reset_default_graph()\n with tf.Graph().as_default():\n inputs = [\n tf.placeholder(dtype=tf.int32, shape=(), name=\"p1\"),\n tf.placeholder(dtype=tf.int32, shape=(), name=\"p2\")\n ]\n outputs = [\n tf.range(start = inputs[0], limit = inputs[1]), # use default delta\n tf.range(start = inputs[1]) # use start as limit with 0 as the first item in the range\n ]\n\n compare_tflite_with_tvm(\n [np.int32(1), np.int32(18)],\n [\"p1\", \"p2\"],\n inputs,\n outputs,\n mode=\"vm\"\n )\n\ndef test_forward_range():\n _test_range(np.int32(1), np.int32(18), np.int32(3))\n _test_range(np.int32(1), np.int32(18), np.float32(3.1)) # increment is of type float\n _test_range(np.float32(1.0), np.int32(18), np.int32(3.1)) # start is of type float\n _test_range_default()\n\n#######################################################################\n# Shape\n# -----\ndef test_forward_shape():\n # tflite 1.13 convert method does not accept empty shapes\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n tf.reset_default_graph()\n with tf.Graph().as_default():\n data = np.array([1, 18, 3], dtype=np.int32)\n start = tf.placeholder(dtype=tf.int32, shape=[], name=\"start\")\n limit = tf.placeholder(dtype=tf.int32, shape=[], name=\"limit\")\n delta = tf.placeholder(dtype=tf.int32, shape=[], name=\"delta\")\n r = tf.range(start, limit, delta, tf.int32, name=\"range\")\n out = tf.shape(r, out_type=tf.dtypes.int32)\n compare_tflite_with_tvm(\n [x for x in np.nditer(data)],\n [\"start\", \"limit\", \"delta\"],\n [start, limit, delta],\n [out],\n mode=\"vm\"\n )\n\n#######################################################################\n# Concatenation\n# -------------\n\ndef _test_concatenation(data, axis):\n \"\"\" One iteration of concatenation \"\"\"\n\n assert len(data) >= 1\n\n with tf.Graph().as_default():\n in_data = [\n array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name=\"in_{}\".format(idx))\n for idx, tensor in enumerate(data)]\n out = array_ops.concat(in_data, axis=axis)\n name = [\"in_{}:0\".format(idx) for idx in range(len(data))]\n\n compare_tflite_with_tvm(data, name, in_data, [out])\n\n\ndef test_forward_concatenation():\n\n _test_concatenation(\n [np.arange(6).reshape((1, 2, 1, 3)),\n np.arange(6).reshape((1, 2, 1, 3))], 1)\n\n _test_concatenation(\n [np.arange(6).reshape((3, 2)),\n np.arange(6).reshape((3, 2))], 1)\n\n _test_concatenation(\n [np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3))], 1)\n\n#######################################################################\n# Unary elemwise\n# --------------\n\ndef _test_unary_elemwise(math_op, data):\n \"\"\" One iteration of unary elemwise \"\"\"\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name='in')\n out = math_op(in_data)\n compare_tflite_with_tvm(data, ['in:0'], [in_data], [out])\n\n#######################################################################\n# Abs\n# ---\n\ndef _test_abs(data):\n \"\"\" One iteration of abs \"\"\"\n return _test_unary_elemwise(math_ops.abs, data)\n#######################################################################\n# Ceil\n# ----\n\ndef _test_ceil(data):\n \"\"\" One iteration of ceil \"\"\"\n return _test_unary_elemwise(math_ops.ceil, data)\n#######################################################################\n# Floor\n# -----\n\ndef _test_floor(data):\n \"\"\" One iteration of floor \"\"\"\n return _test_unary_elemwise(math_ops.floor, data)\n\n#######################################################################\n# Round\n# -----\n\ndef _test_round(data):\n \"\"\" One iteration of round \"\"\"\n return _test_unary_elemwise(math_ops.round, data)\n\n#######################################################################\n# Exp\n# ---\n\ndef _test_exp(data):\n \"\"\" One iteration of exp \"\"\"\n return _test_unary_elemwise(math_ops.exp, data)\n#######################################################################\n# Log\n# ---\n\ndef _test_log(data):\n \"\"\" One iteration of log \"\"\"\n return _test_unary_elemwise(math_ops.log, data)\n#######################################################################\n# Sin\n# ---\n\ndef _test_sin(data):\n \"\"\" One iteration of sin \"\"\"\n return _test_unary_elemwise(math_ops.sin, data)\n#######################################################################\n# Cos\n# ---\n\ndef _test_cos(data):\n \"\"\" One iteration of cos \"\"\"\n return _test_unary_elemwise(math_ops.cos, data)\n#######################################################################\n# Tan\n# ---\n\ndef _test_tan(data):\n \"\"\" One iteration of tan \"\"\"\n return _test_unary_elemwise(math_ops.tan, data)\n#######################################################################\n# Sqrt\n# ----\n\ndef _test_sqrt(data):\n \"\"\" One iteration of sqrt \"\"\"\n return _test_unary_elemwise(math_ops.sqrt, data)\n#######################################################################\n# Rsqrt\n# -----\n\ndef _test_rsqrt(data):\n \"\"\" One iteration of rsqrt \"\"\"\n return _test_unary_elemwise(math_ops.rsqrt, data)\n#######################################################################\n# Neg\n# ---\n\ndef _test_neg(data):\n \"\"\" One iteration of neg \"\"\"\n return _test_unary_elemwise(math_ops.neg, data)\n#######################################################################\n# Square\n# ------\n\ndef _test_square(data):\n \"\"\" One iteration of square \"\"\"\n return _test_unary_elemwise(math_ops.square, data)\n\n#######################################################################\n# Elu\n# ---\n\ndef _test_elu(data):\n \"\"\" One iteration of elu \"\"\"\n return _test_unary_elemwise(nn_ops.elu, data)\n\ndef _test_forward_unary_elemwise(test_op):\n # functions that need positive input\n if test_op.__name__ in {'_test_log', '_test_sqrt', '_test_rsqrt'}:\n test_op(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)))\n else:\n test_op(np.random.uniform(-10, 10, (3, 2)).astype(np.float32))\n\ndef test_all_unary_elemwise():\n _test_forward_unary_elemwise(_test_abs)\n _test_forward_unary_elemwise(_test_floor)\n _test_forward_unary_elemwise(_test_exp)\n _test_forward_unary_elemwise(_test_log)\n _test_forward_unary_elemwise(_test_sin)\n _test_forward_unary_elemwise(_test_sqrt)\n _test_forward_unary_elemwise(_test_rsqrt)\n _test_forward_unary_elemwise(_test_neg)\n _test_forward_unary_elemwise(_test_square)\n # ceil and cos come with TFLite 1.14.0.post1 fbs schema\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n _test_forward_unary_elemwise(_test_ceil)\n _test_forward_unary_elemwise(_test_cos)\n _test_forward_unary_elemwise(_test_round)\n # This fails with TF and Tflite 1.15.2, this could not have been tested\n # in CI or anywhere else. The failure mode is that we see a backtrace\n # from the converter that we need to provide a custom Tan operator\n # implementation.\n #_test_forward_unary_elemwise(_test_tan)\n _test_forward_unary_elemwise(_test_elu)\n\n#######################################################################\n# Element-wise\n# ------------\n\ndef _test_elemwise(math_op, data, fused_activation_function=None, quantized=False, qnn_op=None):\n \"\"\" One iteration of elemwise \"\"\"\n\n assert len(data) == 2\n\n # Test with two tensors\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in_0'),\n array_ops.placeholder(shape=data[1].shape, dtype='float32', name='in_1')]\n\n if quantized:\n # fake_quant will keep the tensors in float32 until the conversion in the session\n inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-100, max=100, name=\"inq_0\"),\n tf.quantization.fake_quant_with_min_max_args(in_data[1], min=-50, max=50, name=\"inq_1\")]\n input_range = {'inq_0': (-100, 100), 'inq_1': (-50, 50)}\n out = math_op(inq_data[0], inq_data[1])\n out = with_fused_activation_function(out, fused_activation_function)\n # set the fp32 output range with respect to the operation\n out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)\n out = tf.quantization.fake_quant_with_min_max_args(out, min=out_min, max=out_max, name=\"out\")\n compare_tflite_with_tvm(data, ['inq_0:0', 'inq_1:0'], inq_data, [out],\n quantized=True, input_range=input_range)\n else:\n out = math_op(in_data[0], in_data[1])\n out = with_fused_activation_function(out, fused_activation_function)\n compare_tflite_with_tvm(data, ['in_0:0', 'in_1:0'], in_data, [out])\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in_0')]\n\n if quantized:\n inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-100, max=100, name=\"inq_0\")]\n inq_const = tf.quantization.fake_quant_with_min_max_args(data[1], min=-50, max=50, name=\"const_tensor\")\n input_range = {'inq_0': (-100, 100)}\n # the 2nd tensor is treated as constant and directly added as part of the operation\n out = math_op(inq_data, ops.convert_to_tensor(inq_const, dtype='float32', name='inq_const'))\n out = with_fused_activation_function(out, fused_activation_function)\n out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)\n out = tf.quantization.fake_quant_with_min_max_args(out, min=out_min, max=out_max, name=\"out\")\n compare_tflite_with_tvm(data[0], ['inq_0:0'], inq_data, [out], quantized=True, input_range=input_range)\n else:\n out = math_op(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype))\n out = with_fused_activation_function(out, fused_activation_function)\n compare_tflite_with_tvm(data[0], ['in_0:0'], in_data, [out])\n\n # Test with constant and tensor\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[1].shape, dtype='float32', name='in_1')]\n\n if quantized:\n inq_const = tf.quantization.fake_quant_with_min_max_args(data[0], min=-100, max=100, name=\"const_tensor\")\n inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-50, max=50, name=\"inq_1\")]\n input_range = {'inq_1': (-50, 50)}\n # the 1st tensor is treated as constant and directly added as part of the operation\n out = math_op(ops.convert_to_tensor(inq_const, dtype='float32', name='inq_const'), inq_data)\n out = with_fused_activation_function(out, fused_activation_function)\n out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)\n out = tf.quantization.fake_quant_with_min_max_args(out, min=out_min, max=out_max, name=\"out\")\n compare_tflite_with_tvm(data[1], ['inq_1:0'], inq_data, [out], quantized=True, input_range=input_range)\n else:\n out = math_op(ops.convert_to_tensor(data[0], dtype=data[0].dtype), in_data[0])\n out = with_fused_activation_function(out, fused_activation_function)\n compare_tflite_with_tvm(data[1], ['in_1:0'], in_data, [out])\n\n#######################################################################\n# Add\n# ---\n\ndef _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None):\n \"\"\" One iteration of add \"\"\"\n return _test_elemwise(math_ops.add, data, fused_activation_function, quantized, qnn_op)\n\n#######################################################################\n# Subtract\n# --------\n\ndef _test_sub(data, fused_activation_function=None, quantized=False, qnn_op=None):\n \"\"\" One iteration of subtract \"\"\"\n return _test_elemwise(math_ops.subtract, data, fused_activation_function, quantized, qnn_op)\n#######################################################################\n# Mul\n# ---\n\ndef _test_mul(data, fused_activation_function=None, quantized=False, qnn_op=None):\n \"\"\" One iteration of mul \"\"\"\n return _test_elemwise(math_ops.multiply, data, fused_activation_function, quantized, qnn_op)\n\n#######################################################################\n# Divide\n# ------\n\ndef _test_div(data, fused_activation_function=None):\n \"\"\" One iteration of divide \"\"\"\n return _test_elemwise(math_ops.divide, data, fused_activation_function)\n#######################################################################\n# Power\n# -----\n\ndef _test_pow(data):\n \"\"\" One iteration of power \"\"\"\n return _test_elemwise(math_ops.pow, data)\n#######################################################################\n# Maximum\n# -------\n\ndef _test_maximum(data):\n \"\"\" One iteration of maximum \"\"\"\n return _test_elemwise(math_ops.maximum, data)\n#######################################################################\n# Minimum\n# -------\n\ndef _test_minimum(data):\n \"\"\" One iteration of minimum \"\"\"\n return _test_elemwise(math_ops.minimum, data)\n#######################################################################\n# Greater\n# -------\n\ndef _test_greater(data):\n \"\"\" One iteration of greater \"\"\"\n return _test_elemwise(math_ops.greater, data)\n#######################################################################\n# Greater_equal\n# -------------\n\ndef _test_greater_equal(data):\n \"\"\" One iteration of greater_equal \"\"\"\n return _test_elemwise(math_ops.greater_equal, data)\n#######################################################################\n# Less\n# ----\n\ndef _test_less(data):\n \"\"\" One iteration of less \"\"\"\n return _test_elemwise(math_ops.less, data)\n#######################################################################\n# Less_equal\n# ----------\n\ndef _test_less_equal(data):\n \"\"\" One iteration of less_equal \"\"\"\n return _test_elemwise(math_ops.less_equal, data)\n#######################################################################\n# Equal\n# -----\n\ndef _test_equal(data):\n \"\"\" One iteration of equal \"\"\"\n return _test_elemwise(math_ops.equal, data)\n#######################################################################\n# Not_equal\n# ---------\n\ndef _test_not_equal(data):\n \"\"\" One iteration of not_equal\"\"\"\n return _test_elemwise(math_ops.not_equal, data)\n#######################################################################\n# Squared_difference\n# ------------------\n\ndef _test_squared_difference(data):\n \"\"\" One iteration of squared difference \"\"\"\n return _test_elemwise(math_ops.squared_difference, data)\n\n#######################################################################\n# Floor_divide\n# ------------\n\ndef _test_floor_divide(data):\n \"\"\" One iteration of floor_div\"\"\"\n return _test_elemwise(math_ops.floordiv, data)\n\n#######################################################################\n# Floor_mod\n# ---------\n\ndef _test_floor_mod(data):\n \"\"\" One iteration of floor_mod\"\"\"\n return _test_elemwise(math_ops.floormod, data)\n\ndef _test_forward_elemwise(testop):\n \"\"\" Elewise\"\"\"\n testop([np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)),\n np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3))])\n testop([np.arange(6.0, dtype=np.float32).reshape((2, 1, 3)),\n np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3))])\n testop([np.arange(3.0, dtype=np.float32).reshape((1, 3)),\n np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3))])\n\ndef _test_forward_elemwise_quantized(testop):\n testop([np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),\n np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8)], quantized=True, qnn_op=testop)\n\ndef _test_elemwise_qnn_out_range(qnn_op):\n # set the fake_quant output range with respect to the input tensors float32 range\n qnn_out_range = {\n _test_add: (-150, 150),\n _test_sub: (-150, 150),\n _test_mul: (-5e+3, 5e+3),\n }\n\n return qnn_out_range[qnn_op]\n\ndef test_all_elemwise():\n _test_forward_elemwise(_test_add)\n _test_forward_elemwise_quantized(_test_add)\n _test_forward_elemwise(partial(_test_add, fused_activation_function=\"RELU\"))\n # this is broken with tf upgrade 1.15.2 and hits a segfault that needs\n # further investigation.\n # _test_forward_elemwise(partial(_test_add, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_sub)\n _test_forward_elemwise_quantized(_test_sub)\n _test_forward_elemwise(partial(_test_sub, fused_activation_function=\"RELU\"))\n _test_forward_elemwise(partial(_test_sub, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_mul)\n _test_forward_elemwise_quantized(_test_mul)\n _test_forward_elemwise(partial(_test_mul, fused_activation_function=\"RELU\"))\n _test_forward_elemwise(partial(_test_mul, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_div)\n _test_forward_elemwise(partial(_test_div, fused_activation_function=\"RELU\"))\n _test_forward_elemwise(partial(_test_div, fused_activation_function=\"RELU6\"))\n _test_forward_elemwise(_test_pow)\n _test_forward_elemwise(_test_maximum)\n _test_forward_elemwise(_test_minimum)\n _test_forward_elemwise(_test_greater)\n _test_forward_elemwise(_test_squared_difference)\n _test_forward_elemwise(_test_greater_equal)\n _test_forward_elemwise(_test_less)\n _test_forward_elemwise(_test_less_equal)\n _test_forward_elemwise(_test_equal)\n _test_forward_elemwise(_test_not_equal)\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n _test_forward_elemwise(_test_floor_divide)\n _test_forward_elemwise(_test_floor_mod)\n\n\n#######################################################################\n# AddN\n# ----\n\n\ndef _test_forward_add_n(inputs):\n tf.reset_default_graph()\n with tf.Graph().as_default():\n temp = []\n for each in inputs:\n temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))\n output = tf.add_n(temp)\n compare_tflite_with_tvm([each for each in inputs], [each.name for each in temp],\n [each for each in temp], [output])\n\n\ndef test_forward_add_n():\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)\n y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)\n z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)\n m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)\n in0 = x\n in1 = [x, y]\n in2 = (x, y, z)\n in3 = m\n in4 = [m, n]\n in5 = (m, n, o)\n _test_forward_add_n(in0)\n _test_forward_add_n(in1)\n _test_forward_add_n(in2)\n _test_forward_add_n(in3)\n _test_forward_add_n(in4)\n _test_forward_add_n(in5)\n\n\n#######################################################################\n# Logical operators\n# -----------------\n\ndef _test_logical_binary(logical_bin_op, data):\n\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype='bool', name='in_0'),\n array_ops.placeholder(shape=data[1].shape, dtype='bool', name='in_1')]\n if logical_bin_op == math_ops.logical_not:\n out = math_ops.logical_or(in_data[0], in_data[1], name='out1')\n out = logical_bin_op(out, name='out')\n else:\n out = logical_bin_op(in_data[0], in_data[1], name='out')\n\n compare_tflite_with_tvm(data, ['in_0:0', 'in_1:0'], in_data, [out])\n\ndef _test_forward_logical_and(data):\n \"\"\" One iteration of logical and \"\"\"\n return _test_logical_binary(math_ops.logical_and, data)\n\ndef _test_forward_logical_or(data):\n \"\"\" One iteration of logical or \"\"\"\n return _test_logical_binary(math_ops.logical_or, data)\n\ndef _test_forward_logical_not(data):\n \"\"\" One iteration of logical not \"\"\"\n return _test_logical_binary(math_ops.logical_not, data)\n\ndef test_all_logical():\n data = [np.random.choice(a=[False, True], size=(2, 3, 4)).astype('bool'),\n np.random.choice(a=[False, True], size=(2, 3, 4)).astype('bool')]\n # boolean dtype is not supported by older versions than TFLite 1.15.0\n if package_version.parse(tf.VERSION) >= package_version.parse('1.15.0'):\n _test_forward_logical_and(data)\n _test_forward_logical_or(data)\n _test_forward_logical_not(data)\n\n#######################################################################\n# Zeros like\n# ----------\n\ndef _test_zeros_like(data):\n \"\"\" One iteration of ZEROS LIKE \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = gen_array_ops.zeros_like(in_data)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_zeros_like():\n \"\"\" ZEROS LIKE \"\"\"\n _test_zeros_like(np.arange(6.0, dtype=np.float32).reshape((1, 6)))\n\n\n#######################################################################\n# Fill\n# ----\n\ndef _test_fill(dims, value_data, value_dtype):\n \"\"\" Use the fill op to create a tensor of value_data with constant dims.\"\"\"\n\n value_data = np.array(value_data, dtype=value_dtype)\n # TF 1.13 TFLite convert method does not accept empty shapes\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n with tf.Graph().as_default():\n value = array_ops.placeholder(dtype=value_dtype, name=\"value\", shape=[])\n out = tf.fill(dims, value)\n compare_tflite_with_tvm([value_data], [\"value\"], [value], [out])\n\n with tf.Graph().as_default():\n input1 = array_ops.placeholder(dtype=value_dtype, name=\"input1\", shape=dims)\n # Fill op gets converted to static tensor during conversion\n out = tf.fill(dims, value_data)\n out1 = tf.add(out, input1)\n input1_data = np.random.uniform(0, 5, size=dims).astype(value_dtype)\n compare_tflite_with_tvm([input1_data], [\"input1\"], [input1], [out1])\n\n\ndef test_forward_fill():\n \"\"\" Test FILL op \"\"\"\n\n _test_fill((1, 2, 2, 4), 5, \"int32\")\n _test_fill((1, 2, 2, 4), 5, \"float32\")\n _test_fill((5, ), 5, \"int32\")\n\n\n#######################################################################\n# Reduce\n# ------\n\ndef _test_reduce(math_op, data, keep_dims=None):\n \"\"\" One iteration of reduce \"\"\"\n\n assert len(data) == 2\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name='in')\n out = math_op(in_data, data[1], keep_dims)\n compare_tflite_with_tvm([data[0]], ['in:0'], [in_data], [out])\n\ndef _test_reduce_quantize(math_op, data, keep_dims=None):\n \"\"\" One iteration of reduce \"\"\"\n\n assert len(data) == 2\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype=\"float32\", name='in')]\n inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0], min=-100, max=100, name=\"inq_0\")]\n input_range = {'inq_0': (-100, 100)}\n out = math_op(inq_data, data[1], keep_dims)\n out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name=\"out\")\n compare_tflite_with_tvm([data[0]], ['inq_0:0'], [inq_data[0]], [out], quantized=True, input_range=input_range)\n\n\n#######################################################################\n# Reduce_min\n# ----------\n\ndef _test_reduce_min(data, keep_dims=None):\n \"\"\" One iteration of reduce_min \"\"\"\n return _test_reduce(math_ops.reduce_min, data, keep_dims)\n\n#######################################################################\n# Reduce_max\n# ----------\n\ndef _test_reduce_max(data, keep_dims=None):\n \"\"\" One iteration of reduce_max \"\"\"\n return _test_reduce(math_ops.reduce_max, data, keep_dims)\n\n#######################################################################\n# Reduce_mean\n# -----------\n\ndef _test_reduce_mean(data, keep_dims=None, quantized=False):\n \"\"\" One iteration of reduce_mean \"\"\"\n if quantized:\n return _test_reduce_quantize(math_ops.reduce_mean, data, keep_dims)\n else:\n return _test_reduce(math_ops.reduce_mean, data, keep_dims)\n\n#######################################################################\n# Reduce_prod\n# -----------\n\ndef _test_reduce_prod(data, keep_dims=None):\n \"\"\" One iteration of reduce_prod \"\"\"\n return _test_reduce(math_ops.reduce_prod, data, keep_dims)\n\n#######################################################################\n# Reduce_sum\n# -----------\n\ndef _test_reduce_sum(data, keep_dims=None):\n \"\"\" One iteration of reduce_sum \"\"\"\n return _test_reduce(math_ops.reduce_sum, data, keep_dims)\n\n#######################################################################\n# Reduce_any\n# ----------\n\ndef _test_reduce_any(data, keep_dims=None):\n \"\"\" One iteration of reduce_any \"\"\"\n return _test_reduce(math_ops.reduce_any, data, keep_dims)\n\ndef _test_forward_reduce(testop, dtype=\"float32\"):\n \"\"\" Reduce \"\"\"\n if dtype == 'bool':\n data0 = [np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype),\n None]\n data1 = [np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype),\n np.array([1, 2], dtype=np.int32)]\n else:\n data0 = [np.random.rand(16, 16, 16, 16).astype(dtype), None]\n data1 = [np.random.rand(16, 16, 16, 16).astype(dtype), np.array([1, 2], dtype=np.int32)]\n testop(data0)\n testop(data0, keep_dims=False)\n testop(data0, keep_dims=True)\n testop(data1)\n testop(data1, keep_dims=False)\n testop(data1, keep_dims=True)\n\ndef _test_forward_reduce_quantized(testop):\n data0 = [np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8), np.array([1, 2], dtype=np.int32)]\n testop(data0, quantized=True)\n testop(data0, keep_dims=False, quantized=True)\n testop(data0, keep_dims=True, quantized=True)\n\ndef test_all_reduce():\n _test_forward_reduce(_test_reduce_min)\n _test_forward_reduce(_test_reduce_max)\n _test_forward_reduce(_test_reduce_mean)\n _test_forward_reduce_quantized(_test_reduce_mean)\n _test_forward_reduce(_test_reduce_prod)\n _test_forward_reduce(_test_reduce_sum)\n if package_version.parse(tf.VERSION) >= package_version.parse('1.15.0'):\n _test_forward_reduce(_test_reduce_any, dtype=\"bool\")\n\n\n#######################################################################\n# Select, Where\n# -------------\n\ndef test_forward_select():\n with tf.Graph().as_default():\n with tf.Session() as sess:\n input1 = tf.placeholder(\n tf.int32, shape=[1, 4, 4, 3], name='input1')\n input2 = tf.placeholder(\n tf.int32, shape=[1, 4, 4, 3], name='input2')\n mask = input1 > input2\n out = tf.where(mask, input1 + 1, input2 * 2)\n in_data1 = np.random.uniform(\n 0, 10, size=(1, 4, 4, 3)).astype(\"int32\")\n in_data2 = np.random.uniform(\n 0, 10, size=(1, 4, 4, 3)).astype(\"int32\")\n\n compare_tflite_with_tvm([in_data1, in_data2], [\n 'input1:0', 'input2:0'], [input1, input2], [out])\n\n\n# Squeeze\n# -------\n\ndef _test_squeeze(data, squeeze_dims=None):\n \"\"\" One iteration of squeeze \"\"\"\n\n if squeeze_dims is None:\n squeeze_dims = []\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n\n if squeeze_dims:\n out = array_ops.squeeze(in_data, squeeze_dims)\n else:\n out = array_ops.squeeze(in_data)\n\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_squeeze():\n \"\"\" Squeeze \"\"\"\n _test_squeeze(np.arange(6).reshape((1, 2, 1, 3)), [0, 2])\n _test_squeeze(np.arange(6).reshape((2, 1, 3, 1)), [1, 3])\n\n\n#######################################################################\n# Quantize/DeQuantize\n# -------------------\n\ndef _test_quantize_dequantize(data):\n \"\"\" One iteration of quantize and dequantize \"\"\"\n\n # Define a dummy model\n data_in = tf.keras.layers.Input(shape=data.shape[1:])\n act_func = tf.keras.layers.Activation('linear')\n keras_model = tf.keras.models.Model(data_in, act_func(data_in))\n\n # Load the model\n converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model)\n\n # To create quantized values with dynamic range of activations, needs representative dataset\n def representative_data_gen():\n for i in range(100):\n yield [data]\n\n converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]\n converter.representative_dataset = representative_data_gen\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = tf.uint8\n converter.inference_output_type = tf.uint8\n\n # Convert the model to TensorFlow Lite format\n tflite_model_quant = converter.convert()\n\n tflite_output = run_tflite_graph(tflite_model_quant, data)\n tvm_output = run_tvm_graph(tflite_model_quant, data, 'input_1')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\n\ndef test_forward_quantize_dequantize():\n \"\"\" Quantize Dequantize \"\"\"\n data = np.random.uniform(0, 1, (1, 4, 4, 3)).astype(\"float32\")\n if package_version.parse(tf.VERSION) >= package_version.parse('2.0.0'):\n _test_quantize_dequantize(data)\n\n\n#######################################################################\n# Pad\n# ---\n\ndef _test_pad(data, mode=\"CONSTANT\", quantized=False):\n \"\"\" One iteration of PAD \"\"\"\n\n assert len(data) == 2\n\n # Test with tensor and constant\n with tf.Graph().as_default():\n in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in')]\n\n if quantized:\n # fake_quant will keep the tensors in float32 until the conversion in the session\n input_range = {'inq_0': (-100, 100)}\n inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0],\n min=-100,\n max=100,\n name=\"inq_0\")]\n out = array_ops.pad(inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)\n compare_tflite_with_tvm([data[0]], ['inq_0:0'], inq_data, [out], quantized=True,\n input_range=input_range)\n else:\n out = array_ops.pad(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)\n compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out])\n\n\ndef test_forward_pad():\n \"\"\" Pad \"\"\"\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),\n np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32)])\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),\n np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32)])\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),\n np.array([[1, 1], [2, 2]], dtype=np.int32)])\n _test_pad([np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),\n np.array([[1, 1], [2, 2]], dtype=np.int32)])\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),\n np.array([[1, 1], [2, 2]], dtype=np.int32)], mode=\"REFLECT\")\n _test_pad([np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),\n np.array([[1, 1], [2, 2]], dtype=np.int32)], mode=\"SYMMETRIC\")\n _test_pad([np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),\n np.array([[1, 1], [2, 2]], dtype=np.int32)], quantized=True)\n\n\n#######################################################################\n# Pack\n# ----\n\ndef _test_pack(data, axis):\n \"\"\" One iteration of pack \"\"\"\n\n assert len(data) >= 1\n\n with tf.Graph().as_default():\n in_data = [\n array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name=\"in_{}\".format(idx))\n for idx, tensor in enumerate(data)]\n out = array_ops.pack(in_data, axis=axis)\n name = [\"in_{}:0\".format(idx) for idx in range(len(data))]\n\n compare_tflite_with_tvm(data, name, in_data, [out])\n\n\ndef test_forward_pack():\n \"\"\" Pack \"\"\"\n _test_pack(\n [np.arange(6).reshape((1, 2, 1, 3)),\n np.arange(6).reshape((1, 2, 1, 3))], 1)\n\n _test_pack(\n [np.arange(6).reshape((3, 2)),\n np.arange(6).reshape((3, 2))], 1)\n\n _test_pack(\n [np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3)),\n np.arange(6).reshape((2, 1, 1, 3))], 1)\n\n\n#######################################################################\n# Unpack\n# ------\n\ndef _test_unpack(data, axis, num_unpacks):\n \"\"\" One iteration of UNPACK \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = gen_array_ops.unpack(in_data, num=num_unpacks, axis=axis, name='unpack')\n out_names = ['out_' + str(n) + ':0' for n in range(num_unpacks)]\n compare_tflite_with_tvm([data], 'Placeholder:0', [in_data], out, out_names=out_names)\n\ndef test_forward_unpack():\n \"\"\" UNPACK \"\"\"\n _test_unpack(np.array(np.random.uniform(0, 5, (3, 1)), dtype=np.int32), axis=1, num_unpacks=1)\n _test_unpack(np.array(np.random.uniform(0, 5, (3, 4)), dtype=np.float32), axis=0, num_unpacks=3)\n # tflite 1.13 doesn't accept negative axis\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n _test_unpack(np.array(np.random.uniform(0, 5, (3, 6)), dtype=np.int32), axis=-2, num_unpacks=3)\n _test_unpack(np.array(np.random.uniform(0, 5, (2, 3, 4)), dtype=np.int32), axis=-3, num_unpacks=2)\n\n\n#######################################################################\n# Local response normalization\n# ----------------------------\n\ndef _test_local_response_normalization(data, depth_radius, bias, alpha, beta):\n \"\"\" One iteration of LOCAL_RESPONSE_NORMALIZATION \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype='float32', name='in_0')\n out = nn_ops.local_response_normalization(in_data, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta)\n compare_tflite_with_tvm(data, 'in_0:0', [in_data], [out])\n\ndef test_forward_local_response_normalization():\n \"\"\" LOCAL_RESPONSE_NORMALIZATION \"\"\"\n data = np.random.uniform(size=(1, 6, 4, 3)).astype('float32')\n # LOCAL_RESPONSE_NORMALIZATION come with TFLite >= 1.14.0 fbs schema\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n _test_local_response_normalization(data, depth_radius=5, bias=1, alpha=1, beta=0.5)\n\n\n#######################################################################\n# L2 normalization\n# ----------------\n\ndef _test_l2_normalization(data, axis, fused_activation_function=None):\n \"\"\" One iteration of L2_NORMALIZATION \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = nn_impl.l2_normalize(in_data, axis)\n out = with_fused_activation_function(out, fused_activation_function)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_l2_normalization():\n \"\"\" L2_NORMALIZATION \"\"\"\n data = np.random.uniform(size=(3, 6, 4)).astype('float32')\n _test_l2_normalization(data, axis=2)\n _test_l2_normalization(data, axis=2, fused_activation_function=\"RELU\")\n\n#######################################################################\n# Logistic\n# --------\n\ndef _test_logistic(data, quantized=False):\n \"\"\" One iteration of LOGISTIC \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype='float32', name='in_0')\n\n if quantized:\n inq_data = tf.quantization.fake_quant_with_min_max_args(in_data, min=-5, max=5, name=\"inq_0\")\n input_range = {'inq_0': (-5, 5)}\n out = math_ops.sigmoid(inq_data)\n out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=1, name=\"out\")\n compare_tflite_with_tvm(data, 'inq_0:0', [inq_data], [out], quantized=True, input_range=input_range)\n else:\n out = math_ops.sigmoid(in_data)\n compare_tflite_with_tvm(data, 'in_0:0', [in_data], [out])\n\ndef test_forward_logistic():\n \"\"\" LOGISTIC \"\"\"\n _test_logistic(np.arange(6.0, dtype=np.float32).reshape((1, 6)))\n _test_logistic(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)\n\n#######################################################################\n# Softmax\n# -------\n\ndef _test_softmax(data):\n \"\"\" One iteration of softmax \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = nn_ops.softmax(in_data)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_softmax():\n \"\"\" Softmax \"\"\"\n _test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6)))\n\n#######################################################################\n# Tanh\n# ----\n\ndef _test_tanh(data):\n \"\"\" One iteration of TANH \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = math_ops.tanh(in_data)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_tanh():\n \"\"\" TANH \"\"\"\n _test_tanh(np.arange(6.0, dtype=np.float32).reshape((1, 6)))\n\n#######################################################################\n# ReLu\n# ----\n\ndef _test_relu(data):\n \"\"\" One iteration of ReLU \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = nn_ops.relu(in_data)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_relu():\n \"\"\" ReLU \"\"\"\n _test_relu(np.arange(6.0, dtype=np.float32).reshape((1, 6)))\n\ndef _test_prelu(data, alpha):\n \"\"\" One iteration of PReLU \"\"\"\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n # This specific pattern will be replaced into PRelu by tflite\n out = nn_ops.relu(in_data) + (-alpha * nn_ops.relu(-in_data))\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_prelu():\n \"\"\" PReLU \"\"\"\n _test_prelu(np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype(\"float32\"), np.full((3,), 0.2, dtype=\"float32\"))\n _test_prelu(np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype(\"float32\"), np.full((1, 1, 3), 0.2, dtype=\"float32\"))\n\n#######################################################################\n# DepthToSpace\n# ------------\n\ndef _test_depthtospace(data, block_size):\n \"\"\" One iteration of depth_to_space operation with given data and block size \"\"\"\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = array_ops.depth_to_space(in_data, block_size)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_depthtospace():\n # DEPTH_TO_SPACE comes with TFLite >= 1.15.0 fbs schema\n if package_version.parse(tf.VERSION) >= package_version.parse('1.15.0'):\n _test_depthtospace(np.random.normal(size=[1, 32, 32, 4]).astype(\"float32\"), 2)\n _test_depthtospace(np.random.normal(size=[1, 16, 8, 32]).astype(\"float32\"), 4)\n\n#######################################################################\n# SpaceToDepth\n# ------------\n\ndef _test_spacetodepth(data, block_size):\n \"\"\" One iteration of space_to_depth operation with given data and block size \"\"\"\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)\n out = array_ops.space_to_depth(in_data, block_size)\n compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])\n\ndef test_forward_spacetodepth():\n _test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]).astype(\"float32\"), 2)\n _test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]).astype(\"float32\"), 4)\n\n#######################################################################\n# Sparse To Dense\n# ---------------\ndef _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):\n # tflite 1.13 convert method does not accept empty shapes\n if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):\n with tf.Graph().as_default():\n indices = tf.placeholder(shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name=\"indices\")\n values = tf.placeholder(shape=sparse_values.shape, dtype=str(sparse_values.dtype), name=\"values\")\n oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))\n\n if default_value == None:\n output = tf.sparse_to_dense(indices, oshape, values)\n compare_tflite_with_tvm(\n [sparse_indices, sparse_values],\n [\"indices\", \"values\"],\n [indices, values],\n [output]\n )\n else:\n dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name=\"default_value\")\n output = tf.sparse_to_dense(indices, oshape, values, dv)\n compare_tflite_with_tvm(\n [sparse_indices, sparse_values, default_value],\n [\"indices\", \"values\", \"default_value\"],\n [indices, values, dv],\n [output]\n )\n\ndef test_forward_sparse_to_dense():\n '''\n Works in tvm/topi/tensorflow. But tflite converter breaks this test case\n _test_sparse_to_dense(\n np.int32(1),\n np.int32(3),\n np.int32(0),\n np.array([5]).astype(\"int32\")\n )\n '''\n # vector\n _test_sparse_to_dense(\n np.array([0, 1, 4]).astype(\"int32\"),\n np.array([3, 3, 3]).astype(\"int32\"),\n np.int32(0),\n np.array([5]).astype(\"int32\")\n )\n # vector nXd\n _test_sparse_to_dense(\n np.array([[0, 0], [1, 2]]).astype(\"int32\"),\n np.array([1, 2]).astype(\"int32\"),\n np.int32(0),\n np.array([3, 4]).astype(\"int32\")\n )\n _test_sparse_to_dense(\n np.array([[0, 0, 0], [1, 2, 3]]).astype(\"int32\"),\n np.array([1, 2]).astype(\"int32\"),\n np.int32(4),\n np.array([2, 3, 4]).astype(\"int32\")\n )\n # floats\n _test_sparse_to_dense(\n np.array([0, 1, 4]).astype(\"int32\"),\n np.array([3.1, 3.1, 3.1]).astype(\"float32\"),\n np.float32(3.5),\n np.array([5]).astype(\"int32\")\n )\n # default value not specified\n _test_sparse_to_dense(\n np.array([0, 1, 4]).astype(\"int32\"),\n np.array([3.1, 3.1, 3.1]).astype(\"float32\"),\n None,\n np.array([5]).astype(\"int32\")\n )\n\n#######################################################################\n# Fully Connected\n# ---------------\n\ndef _test_fully_connected(tensor_in_sizes, filter_in_sizes, bias_in_size=None):\n \"\"\" One iteration of fully connected \"\"\"\n\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]\n filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]\n assert int(total_size_1 / tensor_in_sizes[0]) == filter_in_sizes[0], \\\n \"input size and filter size are mismatched\"\n\n with tf.Graph().as_default():\n in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype='float32')\n in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype='float32')\n\n # reshape N H W C into N H*W*C\n in_data_reshape = array_ops.reshape(in_data, [tensor_in_sizes[0], -1])\n\n out = math_ops.mat_mul(in_data_reshape, in_filter)\n\n # if we have bias\n if bias_in_size:\n assert bias_in_size[0] == filter_in_sizes[1], \"bias and filter size are mismatched\"\n bias_array = [f * 1.0 for f in range(1, bias_in_size[0] + 1)]\n in_bias = constant_op.constant(bias_array, shape=bias_in_size, dtype='float32')\n out = nn_ops.bias_add(out, in_bias)\n\n data_array = np.reshape(data_array, tensor_in_sizes).astype('float32')\n compare_tflite_with_tvm(data_array, 'Placeholder:0', [in_data], [out])\n\n\ndef test_forward_fully_connected():\n \"\"\" Fully Connected \"\"\"\n _test_fully_connected([1, 1, 1, 150], [150, 100])\n _test_fully_connected([1, 1, 1, 150], [150, 100], [100])\n _test_fully_connected([5, 1, 1, 150], [150, 100])\n _test_fully_connected([5, 1, 1, 150], [150, 100], [100])\n\n\n#######################################################################\n# Custom Operators\n# ----------------\n\ndef test_detection_postprocess():\n tf_model_file = tf_testing.get_workload_official(\n \"http://download.tensorflow.org/models/object_detection/\"\n \"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz\",\n \"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03/tflite_graph.pb\"\n )\n converter = tf.lite.TFLiteConverter.from_frozen_graph(\n tf_model_file,\n input_arrays=[\"raw_outputs/box_encodings\", \"raw_outputs/class_predictions\"],\n output_arrays=[\n \"TFLite_Detection_PostProcess\",\n \"TFLite_Detection_PostProcess:1\",\n \"TFLite_Detection_PostProcess:2\",\n \"TFLite_Detection_PostProcess:3\"\n ],\n input_shapes={\n \"raw_outputs/box_encodings\": (1, 1917, 4),\n \"raw_outputs/class_predictions\": (1, 1917, 91),\n },\n )\n converter.allow_custom_ops = True\n converter.inference_type = tf.lite.constants.FLOAT\n tflite_model = converter.convert()\n np.random.seed(0)\n box_encodings = np.random.uniform(size=(1, 1917, 4)).astype('float32')\n class_predictions = np.random.uniform(size=(1, 1917, 91)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model, [box_encodings, class_predictions])\n tvm_output = run_tvm_graph(tflite_model, [box_encodings, class_predictions],\n [\"raw_outputs/box_encodings\", \"raw_outputs/class_predictions\"], num_output=4)\n\n # Check all output shapes are equal\n assert all([tvm_tensor.shape == tflite_tensor.shape \\\n for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)])\n\n # Check valid count is the same\n assert tvm_output[3] == tflite_output[3]\n valid_count = tvm_output[3][0]\n\n # For boxes that do not have any detections, TFLite puts random values. Therefore, we compare\n # tflite and tvm tensors for only valid boxes.\n for i in range(0, valid_count):\n # Check bounding box co-ords\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0][0][i]), np.squeeze(tflite_output[0][0][i]),\n rtol=1e-5, atol=1e-5)\n\n # Check the class\n # Stricter check to ensure class remains same\n np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]),\n np.squeeze(tflite_output[1][0][i]))\n\n # Check the score\n tvm.testing.assert_allclose(np.squeeze(tvm_output[2][0][i]), np.squeeze(tflite_output[2][0][i]),\n rtol=1e-5, atol=1e-5)\n\n\n#######################################################################\n# Mobilenet\n# ---------\n\ndef test_forward_mobilenet_v1():\n \"\"\"Test the Mobilenet V1 TF Lite model.\"\"\"\n # MobilenetV1\n tflite_model_file = tf_testing.get_workload_official(\n \"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz\",\n \"mobilenet_v1_1.0_224.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\ndef test_forward_mobilenet_v2():\n \"\"\"Test the Mobilenet V2 TF Lite model.\"\"\"\n # MobilenetV2\n tflite_model_file = tf_testing.get_workload_official(\n \"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz\",\n \"mobilenet_v2_1.0_224.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\n#######################################################################\n# Mobilenet V3\n# ------------\n\ndef test_forward_mobilenet_v3():\n \"\"\"Test the Mobilenet V3 TF Lite model.\"\"\"\n # In MobilenetV3, some ops are not supported before tf 1.15 fbs schema\n if package_version.parse(tf.VERSION) < package_version.parse('1.15.0'):\n return\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz\",\n \"v3-large_224_1.0_float/v3-large_224_1.0_float.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 224, 224, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\n#######################################################################\n# Inception\n# ---------\n\ndef test_forward_inception_v3_net():\n \"\"\"Test the Inception V3 TF Lite model.\"\"\"\n # InceptionV3\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz\",\n \"inception_v3.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 299, 299, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\ndef test_forward_inception_v4_net():\n \"\"\"Test the Inception V4 TF Lite model.\"\"\"\n # InceptionV4\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz\",\n \"inception_v4.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 299, 299, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]),\n rtol=1e-5, atol=1e-5)\n\ndef test_forward_qnn_inception_v1_net():\n \"\"\"Test the Quantized TFLite Inception model.\"\"\"\n # InceptionV1\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz\",\n \"inception_v1_224_quant.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n\n # Test image. Checking the labels because the requantize implementation is different between\n # TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via\n # labels. Also, giving a real image, instead of random inputs.\n data = get_real_image(224, 224)\n\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tflite_predictions = np.squeeze(tflite_output)\n tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm_predictions = np.squeeze(tvm_output)\n tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]\n tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)\n\ndef test_forward_qnn_mobilenet_v1_net():\n \"\"\"Test the Quantized TFLite Mobilenet V1 model.\"\"\"\n # MobilenetV1\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz\",\n \"mobilenet_v1_1.0_224_quant.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n\n # Test image. Checking the labels because the requantize implementation is different between\n # TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via\n # labels. Also, giving a real image, instead of random inputs.\n data = get_real_image(224, 224)\n\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tflite_predictions = np.squeeze(tflite_output)\n tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm_predictions = np.squeeze(tvm_output)\n tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]\n tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)\n\ndef test_forward_qnn_mobilenet_v2_net():\n \"\"\"Test the Quantized TFLite Mobilenet V2 model.\"\"\"\n # MobilenetV2\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz\",\n \"mobilenet_v2_1.0_224_quant.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n\n # Test image. Checking the labels because the requantize implementation is different between\n # TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via\n # labels. Also, giving a real image, instead of random inputs.\n data = get_real_image(224, 224)\n\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tflite_predictions = np.squeeze(tflite_output)\n tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm_predictions = np.squeeze(tvm_output)\n tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]\n tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)\n\n#######################################################################\n# Mobilenet V3 Quantized\n# ----------------------\n\ndef test_forward_qnn_mobilenet_v3_net():\n \"\"\"Test the Quantized TFLite Mobilenet V3 model.\"\"\"\n # In MobilenetV3, some ops are not supported before tf 1.15 fbs schema\n if package_version.parse(tf.VERSION) < package_version.parse('1.15.0'):\n pytest.skip(\"Unsupported in tflite < 1.15.0\")\n else:\n pytest.skip(\"This segfaults with tensorflow 1.15.2 and above\")\n\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_uint8.tgz\",\n \"v3-large_224_1.0_uint8/v3-large_224_1.0_uint8.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n\n # Test image. Checking the labels because the requantize implementation is different between\n # TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via\n # labels. Also, giving a real image, instead of random inputs.\n data = get_real_image(224, 224)\n\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tflite_predictions = np.squeeze(tflite_output)\n tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input')\n tvm_predictions = np.squeeze(tvm_output)\n tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]\n tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)\n\n\n#######################################################################\n# Quantized SSD Mobilenet\n# -----------------------\n\ndef test_forward_qnn_coco_ssd_mobilenet_v1():\n \"\"\"Test the quantized Coco SSD Mobilenet V1 TF Lite model.\"\"\"\n pytest.skip(\"LLVM bug - getExtendedVectorNumElements - \"\n + \"https://discuss.tvm.ai/t/segfault-in-llvm/3567. The workaround is to use a \"\n + \"specific target, for example, llvm -mpcu=core-avx2\")\n\n tflite_model_file = tf_testing.get_workload_official(\n \"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip\",\n \"detect.tflite\")\n\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n\n data = get_real_image_object_detection(300, 300)\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'normalized_input_image_tensor', num_output=4)\n\n # Check all output shapes are equal\n assert all([tvm_tensor.shape == tflite_tensor.shape \\\n for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)])\n\n # Check valid count is the same\n assert tvm_output[3] == tflite_output[3]\n valid_count = tvm_output[3][0]\n\n # For boxes that do not have any detections, TFLite puts random values. Therefore, we compare\n # tflite and tvm tensors for only valid boxes.\n for i in range(0, valid_count):\n # We compare the bounding boxes whose prediction score is above 60%. This is typical in end\n # to end application where a low prediction score is discarded. This is also needed because\n # multiple low score bounding boxes can have same score and TFlite and TVM can have\n # different orderings for same score bounding boxes. Another reason for minor differences in\n # low score bounding boxes is the difference between TVM and TFLite for requantize operator.\n if tvm_output[2][0][i] > 0.6:\n # Check bounding box co-ords. The tolerances have to be adjusted, from 1e-5 to 1e-2,\n # because of differences between for requantiize operator in TFLite and TVM.\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0][0][i]),\n np.squeeze(tflite_output[0][0][i]),\n rtol=1e-2, atol=1e-2)\n\n # Check the class\n # Stricter check to ensure class remains same\n np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]),\n np.squeeze(tflite_output[1][0][i]))\n\n # Check the score\n tvm.testing.assert_allclose(np.squeeze(tvm_output[2][0][i]),\n np.squeeze(tflite_output[2][0][i]),\n rtol=1e-5, atol=1e-5)\n\n\n#######################################################################\n# SSD Mobilenet\n# -------------\n\ndef test_forward_coco_ssd_mobilenet_v1():\n \"\"\"Test the FP32 Coco SSD Mobilenet V1 TF Lite model.\"\"\"\n tflite_model_file = tf_testing.get_workload_official(\n \"https://raw.githubusercontent.com/dmlc/web-data/master/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz\",\n \"ssd_mobilenet_v1_coco_2018_01_28.tflite\")\n\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n\n np.random.seed(0)\n data = np.random.uniform(size=(1, 300, 300, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'normalized_input_image_tensor', num_output=4)\n\n # Check all output shapes are equal\n assert all([tvm_tensor.shape == tflite_tensor.shape \\\n for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)])\n\n # Check valid count is the same\n assert tvm_output[3] == tflite_output[3]\n valid_count = tvm_output[3][0]\n\n # For boxes that do not have any detections, TFLite puts random values. Therefore, we compare\n # tflite and tvm tensors for only valid boxes.\n for i in range(0, valid_count):\n # Check bounding box co-ords\n tvm.testing.assert_allclose(np.squeeze(tvm_output[0][0][i]), np.squeeze(tflite_output[0][0][i]),\n rtol=1e-5, atol=1e-5)\n # Check the class\n np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i]))\n\n # Check the score\n tvm.testing.assert_allclose(np.squeeze(tvm_output[2][0][i]), np.squeeze(tflite_output[2][0][i]),\n rtol=1e-5, atol=1e-5)\n\n#######################################################################\n# MediaPipe\n# -------------\ndef test_forward_mediapipe_hand_landmark():\n \"\"\"Test MediaPipe 2D hand landmark TF Lite model.\"\"\"\n # MediaPipe 2D hand landmark TF\n tflite_model_file = download_testdata(\n \"https://github.com/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite\",\n \"hand_landmark.tflite\")\n with open(tflite_model_file, \"rb\") as f:\n tflite_model_buf = f.read()\n data = np.random.uniform(size=(1, 256, 256, 3)).astype('float32')\n tflite_output = run_tflite_graph(tflite_model_buf, data)\n tvm_output = run_tvm_graph(tflite_model_buf, data, 'input_1', num_output=2)\n for i in range(2):\n tvm.testing.assert_allclose(np.squeeze(tvm_output[i]), np.squeeze(tflite_output[i]),\n rtol=1e-5, atol=1e-5)\n\n\n#######################################################################\n# Main\n# ----\nif __name__ == '__main__':\n # BatchToSpaceND\n test_forward_batch_to_space_nd()\n\n # SpaceToBatchND\n test_forward_space_to_batch_nd()\n\n # Split\n test_forward_split()\n\n # Transpose\n test_forward_transpose()\n\n # Cast\n test_forward_cast()\n\n # BatchMatMul\n test_forward_batch_matmul()\n\n # Tile\n test_forward_tile()\n\n # Query\n test_forward_shape()\n\n # Transforms\n test_forward_concatenation()\n test_forward_pad()\n test_forward_pack()\n test_forward_unpack()\n test_forward_reshape()\n test_all_resize()\n test_forward_range()\n test_forward_squeeze()\n test_forward_slice()\n test_forward_topk()\n test_forward_gather()\n test_forward_gather_nd()\n test_forward_stridedslice()\n test_forward_depthtospace()\n test_forward_spacetodepth()\n test_forward_sparse_to_dense()\n test_forward_select()\n test_forward_quantize_dequantize()\n\n # NN\n test_forward_convolution()\n test_forward_transpose_conv()\n test_forward_logistic()\n test_forward_pooling()\n test_forward_l2_pool2d()\n test_forward_softmax()\n test_forward_tanh()\n test_forward_relu()\n test_forward_prelu()\n test_forward_fully_connected()\n test_forward_l2_normalization()\n test_forward_local_response_normalization()\n\n # Elemwise\n test_all_elemwise()\n test_forward_add_n()\n\n # Unary elemwise\n test_all_unary_elemwise()\n # Zeros Like\n test_forward_zeros_like()\n\n # Fill\n test_forward_fill()\n\n # Reduce\n test_all_reduce()\n\n # Logical\n test_all_logical()\n\n # Detection_PostProcess\n test_detection_postprocess()\n\n # End to End\n test_forward_mobilenet_v1()\n test_forward_mobilenet_v2()\n test_forward_mobilenet_v3()\n test_forward_inception_v3_net()\n test_forward_inception_v4_net()\n test_forward_coco_ssd_mobilenet_v1()\n test_forward_mediapipe_hand_landmark()\n\n # End to End quantized\n test_forward_qnn_inception_v1_net()\n test_forward_qnn_mobilenet_v1_net()\n test_forward_qnn_mobilenet_v2_net()\n #This also fails with a segmentation fault in my run\n #with Tflite 1.15.2\n test_forward_qnn_mobilenet_v3_net()\n test_forward_qnn_coco_ssd_mobilenet_v1()\n"
] | [
[
"tensorflow.python.ops.array_ops.space_to_batch_nd",
"tensorflow.python.ops.array_ops.strided_slice",
"numpy.asarray",
"tensorflow.python.ops.array_ops.split",
"numpy.squeeze",
"tensorflow.python.ops.nn_ops.conv2d_transpose",
"tensorflow.python.ops.nn_ops.pool",
"tensorflow.lite.TFLiteConverter.from_session",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.math_ops.tanh",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.nn_ops.local_response_normalization",
"tensorflow.where",
"tensorflow.python.ops.nn_ops.softmax",
"tensorflow.python.ops.array_ops.depth_to_space",
"tensorflow.add_n",
"tensorflow.contrib.lite.TFLiteConverter.from_keras_model",
"numpy.random.randint",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.Graph",
"tensorflow.sparse_to_dense",
"numpy.reshape",
"numpy.arange",
"numpy.full",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.reset_default_graph",
"tensorflow.contrib.lite.Interpreter",
"tensorflow.add",
"tensorflow.Session",
"numpy.float32",
"tensorflow.quantization.fake_quant_with_min_max_args",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.math_ops.logical_or",
"tensorflow.python.ops.nn_ops.bias_add",
"tensorflow.python.ops.array_ops.pack",
"tensorflow.square",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.gen_array_ops.unpack",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.gen_array_ops.zeros_like",
"tensorflow.fill",
"tensorflow.gather_nd",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.nn_ops.depthwise_conv2d_native",
"tensorflow.python.ops.nn_ops.conv2d",
"tensorflow.shape",
"numpy.random.choice",
"tensorflow.placeholder",
"tensorflow.python.ops.array_ops.batch_to_space_nd",
"tensorflow.python.ops.array_ops.space_to_depth",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.random.rand",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.python.ops.nn_impl.l2_normalize",
"tensorflow.python.ops.math_ops.sigmoid",
"tensorflow.keras.layers.Activation",
"tensorflow.python.ops.array_ops.concat",
"numpy.random.seed",
"tensorflow.lite.TFLiteConverter.from_frozen_graph",
"tensorflow.range",
"tensorflow.python.ops.nn_ops.relu6",
"numpy.nditer",
"numpy.int32",
"tensorflow.python.ops.math_ops.mat_mul",
"tensorflow.python.ops.array_ops.reshape",
"numpy.random.normal",
"numpy.prod",
"tensorflow.python.ops.nn_ops.relu",
"numpy.random.uniform",
"tensorflow.python.ops.nn_ops.top_k",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
apoorv2904/fairseq | [
"d60eb45f09721d04e14791eb184770234324e3d8"
] | [
"fairseq/tasks/fairseq_task.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport os\nimport warnings\nfrom argparse import Namespace\n\nimport torch\nfrom fairseq import metrics, search, tokenizer, utils\nfrom fairseq.data import Dictionary, FairseqDataset, data_utils, iterators\nfrom fairseq.dataclass.utils import gen_parser_from_dataclass\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass FairseqTask(object):\n \"\"\"\n Tasks store dictionaries and provide helpers for loading/iterating over\n Datasets, initializing the Model/Criterion and calculating the loss.\n \"\"\"\n\n @classmethod\n def add_args(cls, parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n dc = getattr(cls, \"__dataclass\", None)\n if dc is not None:\n gen_parser_from_dataclass(parser, dc())\n\n @staticmethod\n def logging_outputs_can_be_summed(criterion) -> bool:\n \"\"\"\n Whether the logging outputs returned by `train_step` and `valid_step` can\n be summed across workers prior to calling `aggregate_logging_outputs`.\n Setting this to True will improves distributed training speed.\n \"\"\"\n return criterion.logging_outputs_can_be_summed()\n\n def __init__(self, args):\n self.args = args\n self.datasets = {}\n self.dataset_to_epoch_iter = {}\n\n @classmethod\n def load_dictionary(cls, filename):\n \"\"\"Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n \"\"\"\n return Dictionary.load(filename)\n\n @classmethod\n def build_dictionary(\n cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8\n ):\n \"\"\"Build the dictionary\n\n Args:\n filenames (list): list of filenames\n workers (int): number of concurrent workers\n threshold (int): defines the minimum word count\n nwords (int): defines the total number of words in the final dictionary,\n including special symbols\n padding_factor (int): can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n d = Dictionary()\n for filename in filenames:\n Dictionary.add_file_to_dictionary(\n filename, d, tokenizer.tokenize_line, workers\n )\n d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)\n return d\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n return cls(args, **kwargs)\n\n def has_sharded_data(self, split):\n return os.pathsep in getattr(self.args, \"data\", \"\")\n\n def load_dataset(self, split, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n raise NotImplementedError\n\n def dataset(self, split):\n \"\"\"\n Return a loaded dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n\n Returns:\n a :class:`~fairseq.data.FairseqDataset` corresponding to *split*\n \"\"\"\n from fairseq.data import FairseqDataset\n\n if split not in self.datasets:\n raise KeyError(\"Dataset not loaded: \" + split)\n if not isinstance(self.datasets[split], FairseqDataset):\n raise TypeError(\"Datasets are expected to be of type FairseqDataset\")\n return self.datasets[split]\n\n def filter_indices_by_size(\n self, indices, dataset, max_positions=None, ignore_invalid_inputs=False\n ):\n \"\"\"\n Filter examples that are too large\n\n Args:\n indices (np.array): original array of sample indices\n dataset (~fairseq.data.FairseqDataset): dataset to batch\n max_positions (optional): max sentence length supported by the\n model (default: None).\n ignore_invalid_inputs (bool, optional): don't raise Exception for\n sentences that are too long (default: False).\n Returns:\n np.array: array of filtered sample indices\n \"\"\"\n indices, ignored = dataset.filter_indices_by_size(indices, max_positions)\n if len(ignored) > 0:\n if not ignore_invalid_inputs:\n raise Exception(\n (\n \"Size of sample #{} is invalid (={}) since max_positions={}, \"\n \"skip this example with --skip-invalid-size-inputs-valid-test\"\n ).format(ignored[0], dataset.size(ignored[0]), max_positions)\n )\n logger.warning(\n (\n \"{} samples have invalid sizes and will be skipped, \"\n \"max_positions={}, first few sample ids={}\"\n ).format(len(ignored), max_positions, ignored[:10])\n )\n return indices\n\n def can_reuse_epoch_itr(self, dataset):\n # We can reuse the epoch iterator across epochs as long as the dataset\n # hasn't disabled it. We default to ``False`` here, although in practice\n # this will be ``True`` for most datasets that inherit from\n # ``FairseqDataset`` due to the base implementation there.\n return getattr(dataset, \"can_reuse_epoch_itr_across_epochs\", False)\n\n def get_batch_iterator(\n self,\n dataset,\n max_tokens=None,\n max_sentences=None,\n max_positions=None,\n ignore_invalid_inputs=False,\n required_batch_size_multiple=1,\n seed=1,\n num_shards=1,\n shard_id=0,\n num_workers=0,\n epoch=1,\n data_buffer_size=0,\n disable_iterator_cache=False,\n ):\n \"\"\"\n Get an iterator that yields batches of data from the given dataset.\n\n Args:\n dataset (~fairseq.data.FairseqDataset): dataset to batch\n max_tokens (int, optional): max number of tokens in each batch\n (default: None).\n max_sentences (int, optional): max number of sentences in each\n batch (default: None).\n max_positions (optional): max sentence length supported by the\n model (default: None).\n ignore_invalid_inputs (bool, optional): don't raise Exception for\n sentences that are too long (default: False).\n required_batch_size_multiple (int, optional): require batch size to\n be a multiple of N (default: 1).\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 1).\n data_buffer_size (int, optional): number of batches to\n preload (default: 0).\n disable_iterator_cache (bool, optional): don't cache the\n EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)\n (default: False).\n Returns:\n ~fairseq.iterators.EpochBatchIterator: a batched iterator over the\n given dataset split\n \"\"\"\n can_reuse_epoch_itr = not disable_iterator_cache and self.can_reuse_epoch_itr(\n dataset\n )\n if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter:\n logger.debug(\"reusing EpochBatchIterator for epoch {}\".format(epoch))\n return self.dataset_to_epoch_iter[dataset]\n\n assert isinstance(dataset, FairseqDataset)\n\n # initialize the dataset with the correct starting epoch\n dataset.set_epoch(epoch)\n\n # get indices ordered by example size\n with data_utils.numpy_seed(seed):\n indices = dataset.ordered_indices()\n\n # filter examples that are too large\n if max_positions is not None:\n indices = self.filter_indices_by_size(\n indices, dataset, max_positions, ignore_invalid_inputs\n )\n\n # create mini-batches with given size constraints\n batch_sampler = dataset.batch_by_size(\n indices,\n max_tokens=max_tokens,\n max_sentences=max_sentences,\n required_batch_size_multiple=required_batch_size_multiple,\n )\n\n # return a reusable, sharded iterator\n epoch_iter = iterators.EpochBatchIterator(\n dataset=dataset,\n collate_fn=dataset.collater,\n batch_sampler=batch_sampler,\n seed=seed,\n num_shards=num_shards,\n shard_id=shard_id,\n num_workers=num_workers,\n epoch=epoch,\n buffer_size=data_buffer_size,\n )\n\n if can_reuse_epoch_itr:\n self.dataset_to_epoch_iter[dataset] = epoch_iter\n\n return epoch_iter\n\n def build_model(self, args):\n \"\"\"\n Build the :class:`~fairseq.models.BaseFairseqModel` instance for this\n task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.models.BaseFairseqModel` instance\n \"\"\"\n from fairseq import models, quantization_utils\n\n model = models.build_model(args, self)\n if getattr(args, \"tpu\", False):\n model.prepare_for_tpu_()\n model = quantization_utils.quantize_model_scalar(model, args)\n return model\n\n def build_criterion(self, args):\n \"\"\"\n Build the :class:`~fairseq.criterions.FairseqCriterion` instance for\n this task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.criterions.FairseqCriterion` instance\n \"\"\"\n from fairseq import criterions\n\n return criterions.build_criterion(args, self)\n\n def build_generator(\n self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None\n ):\n if getattr(args, \"score_reference\", False):\n from fairseq.sequence_scorer import SequenceScorer\n\n return SequenceScorer(\n self.target_dictionary,\n compute_alignment=getattr(args, \"print_alignment\", False),\n )\n\n from fairseq.sequence_generator import (\n SequenceGenerator,\n SequenceGeneratorWithAlignment,\n )\n\n # Choose search strategy. Defaults to Beam Search.\n sampling = getattr(args, \"sampling\", False)\n sampling_topk = getattr(args, \"sampling_topk\", -1)\n sampling_topp = getattr(args, \"sampling_topp\", -1.0)\n diverse_beam_groups = getattr(args, \"diverse_beam_groups\", -1)\n diverse_beam_strength = getattr(args, \"diverse_beam_strength\", 0.5)\n match_source_len = getattr(args, \"match_source_len\", False)\n diversity_rate = getattr(args, \"diversity_rate\", -1)\n constrained = getattr(args, \"constraints\", False)\n prefix_allowed_tokens_fn = getattr(args, \"prefix_allowed_tokens_fn\", None)\n if (\n sum(\n int(cond)\n for cond in [\n sampling,\n diverse_beam_groups > 0,\n match_source_len,\n diversity_rate > 0,\n ]\n )\n > 1\n ):\n raise ValueError(\"Provided Search parameters are mutually exclusive.\")\n assert sampling_topk < 0 or sampling, \"--sampling-topk requires --sampling\"\n assert sampling_topp < 0 or sampling, \"--sampling-topp requires --sampling\"\n\n if sampling:\n search_strategy = search.Sampling(\n self.target_dictionary, sampling_topk, sampling_topp\n )\n elif diverse_beam_groups > 0:\n search_strategy = search.DiverseBeamSearch(\n self.target_dictionary, diverse_beam_groups, diverse_beam_strength\n )\n elif match_source_len:\n # this is useful for tagging applications where the output\n # length should match the input length, so we hardcode the\n # length constraints for simplicity\n search_strategy = search.LengthConstrainedBeamSearch(\n self.target_dictionary,\n min_len_a=1,\n min_len_b=0,\n max_len_a=1,\n max_len_b=0,\n )\n elif diversity_rate > -1:\n search_strategy = search.DiverseSiblingsSearch(\n self.target_dictionary, diversity_rate\n )\n elif constrained:\n search_strategy = search.LexicallyConstrainedBeamSearch(\n self.target_dictionary, args.constraints\n )\n elif prefix_allowed_tokens_fn:\n search_strategy = search.PrefixConstrainedBeamSearch(\n self.target_dictionary, prefix_allowed_tokens_fn\n )\n else:\n search_strategy = search.BeamSearch(self.target_dictionary)\n\n if seq_gen_cls is None:\n if getattr(args, \"print_alignment\", False):\n seq_gen_cls = SequenceGeneratorWithAlignment\n else:\n seq_gen_cls = SequenceGenerator\n extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}\n return seq_gen_cls(\n models,\n self.target_dictionary,\n beam_size=getattr(args, \"beam\", 5),\n max_len_a=getattr(args, \"max_len_a\", 0),\n max_len_b=getattr(args, \"max_len_b\", 200),\n min_len=getattr(args, \"min_len\", 1),\n normalize_scores=(not getattr(args, \"unnormalized\", False)),\n len_penalty=getattr(args, \"lenpen\", 1),\n unk_penalty=getattr(args, \"unkpen\", 0),\n temperature=getattr(args, \"temperature\", 1.0),\n match_source_len=getattr(args, \"match_source_len\", False),\n no_repeat_ngram_size=getattr(args, \"no_repeat_ngram_size\", 0),\n search_strategy=search_strategy,\n **extra_gen_cls_kwargs,\n )\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False\n ):\n \"\"\"\n Do forward and backward, and return the loss as computed by *criterion*\n for the given *model* and *sample*.\n\n Args:\n sample (dict): the mini-batch. The format is defined by the\n :class:`~fairseq.data.FairseqDataset`.\n model (~fairseq.models.BaseFairseqModel): the model\n criterion (~fairseq.criterions.FairseqCriterion): the criterion\n optimizer (~fairseq.optim.FairseqOptimizer): the optimizer\n update_num (int): the current update\n ignore_grad (bool): multiply loss by 0 if this is set to True\n\n Returns:\n tuple:\n - the loss\n - the sample size, which is used as the denominator for the\n gradient\n - logging outputs to display while training\n \"\"\"\n model.train()\n model.set_num_updates(update_num)\n with torch.autograd.profiler.record_function(\"forward\"):\n loss, sample_size, logging_output = criterion(model, sample)\n if ignore_grad:\n loss *= 0\n with torch.autograd.profiler.record_function(\"backward\"):\n optimizer.backward(loss)\n return loss, sample_size, logging_output\n\n def valid_step(self, sample, model, criterion):\n model.eval()\n with torch.no_grad():\n loss, sample_size, logging_output = criterion(model, sample)\n return loss, sample_size, logging_output\n\n def inference_step(\n self, generator, models, sample, prefix_tokens=None, constraints=None\n ):\n with torch.no_grad():\n return generator.generate(\n models, sample, prefix_tokens=prefix_tokens, constraints=constraints\n )\n\n def begin_epoch(self, epoch, model):\n \"\"\"Hook function called before the start of each epoch.\"\"\"\n pass\n\n def begin_valid_epoch(self, epoch, model):\n \"\"\"Hook function called before the start of each validation epoch.\"\"\"\n pass\n\n def aggregate_logging_outputs(self, logging_outputs, criterion):\n \"\"\"[deprecated] Aggregate logging outputs from data parallel training.\"\"\"\n utils.deprecation_warning(\n \"The aggregate_logging_outputs API is deprecated. \"\n \"Please use the reduce_metrics API instead.\"\n )\n with metrics.aggregate() as agg:\n self.reduce_metrics(logging_outputs, criterion)\n return agg.get_smoothed_values()\n\n def reduce_metrics(self, logging_outputs, criterion):\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n # backward compatibility for tasks that override aggregate_logging_outputs\n base_func = FairseqTask.aggregate_logging_outputs\n self_func = getattr(self, \"aggregate_logging_outputs\").__func__\n if self_func is not base_func:\n utils.deprecation_warning(\n \"Tasks should implement the reduce_metrics API. \"\n \"Falling back to deprecated aggregate_logging_outputs API.\"\n )\n agg_logging_outputs = self.aggregate_logging_outputs(\n logging_outputs, criterion\n )\n for k, v in agg_logging_outputs.items():\n metrics.log_scalar(k, v)\n return\n\n if not any(\"ntokens\" in log for log in logging_outputs):\n warnings.warn(\n \"ntokens not found in Criterion logging outputs, cannot log wpb or wps\"\n )\n else:\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\n metrics.log_scalar(\"wpb\", ntokens, priority=180, round=1)\n metrics.log_speed(\"wps\", ntokens, priority=90, round=1)\n\n if not any(\"nsentences\" in log for log in logging_outputs):\n warnings.warn(\n \"nsentences not found in Criterion logging outputs, cannot log bsz\"\n )\n else:\n nsentences = sum(log.get(\"nsentences\", 0) for log in logging_outputs)\n metrics.log_scalar(\"bsz\", nsentences, priority=190, round=1)\n\n criterion.__class__.reduce_metrics(logging_outputs)\n\n def max_positions(self):\n \"\"\"Return the max input length allowed by the task.\"\"\"\n return None\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary` (if applicable\n for this task).\"\"\"\n raise NotImplementedError\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary` (if applicable\n for this task).\"\"\"\n raise NotImplementedError\n\n\nclass LegacyFairseqTask(FairseqTask):\n def __init__(self, args: Namespace):\n self.args = args\n self.datasets = {}\n self.dataset_to_epoch_iter = {}\n\n @classmethod\n def setup_task(cls, args: Namespace, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n return cls(args, **kwargs)\n\n def has_sharded_data(self, split):\n return os.pathsep in getattr(self.args, \"data\", \"\")\n\n def build_model(self, args: Namespace):\n \"\"\"\n Build the :class:`~fairseq.models.BaseFairseqModel` instance for this\n task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.models.BaseFairseqModel` instance\n \"\"\"\n from fairseq import models, quantization_utils\n\n model = models.build_model(args, self)\n if getattr(args, \"tpu\", False):\n model.prepare_for_tpu_()\n model = quantization_utils.quantize_model_scalar(model, args)\n return model\n\n def build_criterion(self, args: Namespace):\n \"\"\"\n Build the :class:`~fairseq.criterions.FairseqCriterion` instance for\n this task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.criterions.FairseqCriterion` instance\n \"\"\"\n from fairseq import criterions\n\n return criterions.build_criterion(args, self)\n"
] | [
[
"torch.autograd.profiler.record_function",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
algotradingsoc/generative_modelling | [
"c08fc241a82e959d29e3fc8e9566b06e016f6131"
] | [
"genmodels/gpvae/gp_vae.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom tensorflow_probability import distributions as tfd\nfrom collections import defaultdict\nimport time\nfrom sklearn import preprocessing\n\nfrom genmodels.gpvae.kernels import *\nfrom genmodels.gpvae.encoder import *\nfrom genmodels.gpvae.decoder import *\n\n\"\"\"\n GP-VAE model\n\"\"\"\n\n# Required parameters when instantiating\nrequired_params = [\"latent_dim\", \"data_dim\", \"time_length\"]\n\n\nclass GP_VAE(tf.keras.Model):\n def __init__(self, params):\n super(GP_VAE, self).__init__()\n\n #assert required_params in list(params.keys()), \"Need to provide required parameters: \" + \", \".join(np.setdiff(required_params, list(params.keys())))\n\n if \"seed\" in params.keys():\n np.random.seed(params[\"seed\"])\n tf.random.set_seed(params[\"seed\"])\n \n self.params = params\n self.initialise(latent_dim=params['latent_dim'], \n data_dim=params['data_dim'], \n time_length=params['time_length'], \n encoder_sizes=params['encoder_sizes'],\n decoder_sizes=params['decoder_sizes'],\n kernel=params['kernel'], \n sigma=params['sigma'],\n length_scale=params['length_scale'],\n kernel_scales=params['kernel_scales'],\n beta=params['beta'],\n M=params['M'],\n K=params['K'],\n window_size=params['window_size'],\n paper_version=params[\"paper_version\"])\n\n\n def initialise(self, latent_dim, data_dim, time_length,\n encoder_sizes=(64, 64),\n decoder_sizes=(64, 64),\n beta=1.0, M=10, K=1, \n kernel=\"cauchy\", sigma=1., \n length_scale=1.0, kernel_scales=1, \n window_size=3, paper_version=False):\n \"\"\" Proposed GP-VAE model with Gaussian Process prior\n :param latent_dim: latent space dimensionality\n :param data_dim: original data dimensionality\n :param time_length: time series duration\n :param encoder_sizes: layer sizes for the encoder network\n :param decoder_sizes: layer sizes for the decoder network\n :param beta: tradeoff coefficient between reconstruction and KL terms in ELBO\n :param M: number of Monte Carlo samples for ELBO estimation\n :param K: number of importance weights for IWAE model (see: https://arxiv.org/abs/1509.00519)\n :param kernel: Gaussian Process kernel [\"cauchy\", \"diffusion\", \"rbf\", \"matern\"]\n :param sigma: scale parameter for a kernel function\n :param length_scale: length scale parameter for a kernel function\n :param kernel_scales: number of different length scales over latent space dimensions\n :param paper_version: bool indicating whether to use the paper's verion of the decoder\n paper only uses a network for the mean and holds variance at 1 for all time series\n \"\"\"\n self.training = False # Initialise to training mode False\n\n self.latent_dim = latent_dim\n self.data_dim = data_dim\n self.time_length = time_length\n\n self.encoder = JointEncoder(latent_dim, encoder_sizes, window_size=window_size)\n self.decoder = GaussianDecoder(data_dim, decoder_sizes, paper_version=paper_version)\n\n self.beta = beta\n self.K = K\n self.M = M\n self.kernel = kernel\n self.sigma = sigma\n self.length_scale = length_scale\n self.kernel_scales = kernel_scales\n\n self.training = False\n self.augment_at_train_time = False\n # If using built in augmentation, % range of points to drop at train time\n self.drop_pct_range = [0.3, 0.8] \n\n # KL components\n self.pz_scale_inv = None\n self.pz_scale_log_abs_determinant = None\n self.prior = None\n\n\n def __call__(self, x, with_var=False):\n scalers = []\n for i in range(len(x)):\n scaler = preprocessing.StandardScaler()\n x[i] = scaler.fit_transform(x[i])\n scalers.append(scaler)\n \n mean = self.decode(self.encode(x).mean()).mean()\n if with_var:\n variance = self.decode(self.encode(x).mean()).variance()\n return mean, variance\n\n for i, scaler, _ in enumerate(scalers, mean):\n mean[i] = scaler.inverse_transform(mean[i])\n\n return mean\n\n\n # Fetch variance\n def get_variance(self, x):\n return self.decode(self.encode(x).mean()).variance()\n\n\n # Draw sample from imputed posterior\n def sample(self, x, num_samples=1):\n return tf.convert_to_tensor([self.decode(self.encode(x).mean()).sample() for _ in range(num_samples)])\n\n\n def decode(self, z):\n num_dim = len(z.shape)\n assert num_dim > 2\n perm = list(range(num_dim - 2)) + [num_dim - 1, num_dim - 2]\n return self.decoder(tf.transpose(z, perm=perm))\n\n\n def encode(self, x):\n x = tf.identity(x) # cast x as Tensor just in case\n return self.encoder(x)\n \n\n def compute_loss(self, x, m_mask=None, return_parts=False, initialising=False):\n return self._compute_loss(x, m_mask=m_mask, return_parts=return_parts, initialising=initialising)\n\n\n def _compute_loss(self, x, m_mask=None, return_parts=False, initialising=False):\n \"\"\"\n Do not provide m_mask for standard time series imputation tasks!!!\n \"\"\"\n assert len(x.shape) == 3, \"Input should have shape: [batch_size, time_length, data_dim]\"\n\n # If in training and augmenting at train time\n if self.training and self.augment_at_train_time and not initialising and m_mask is None:\n x_new = []\n for xi in x:\n mask = []\n # Create a drop-mask by randomly sampling a a drop % for each row\n for _ in range(xi.shape[1]):\n drop_pct = np.random.uniform(self.drop_pct_range[0], self.drop_pct_range[1])\n m = np.random.choice([0, 1], size=xi.shape[0], p=[drop_pct, 1-drop_pct]) # Generate random mask\n mask.append(m)\n mask = np.array(mask).T\n x_new.append(np.multiply(xi, mask))\n x = np.array(x_new, dtype=np.float32)\n\n scalers = []\n for i in range(len(x)):\n scaler = preprocessing.StandardScaler()\n x[i] = scaler.fit_transform(x[i])\n scalers.append(scaler)\n\n x = tf.identity(x) # in case x is not a Tensor already...\n x = tf.tile(x, [self.M * self.K, 1, 1]) # shape=(M*K*BS, TL, D)\n\n if m_mask is not None:\n m_mask = tf.identity(m_mask) # in case m_mask is not a Tensor already...\n m_mask = tf.tile(m_mask, [self.M * self.K, 1, 1]) # shape=(M*K*BS, TL, D)\n m_mask = tf.cast(m_mask, tf.bool)\n\n pz = self._get_prior() # p(z)\n qz_x = self.encode(x) # q(z|x) ie. q(z)\n z = qz_x.sample() \n px_z = self.decode(z) # p(x|z)\n\n nll = -px_z.log_prob(x) # shape=(M*K*BS, TL, D), implement -E[log p(x|z)]\n nll = tf.where(tf.math.is_finite(nll), nll, tf.zeros_like(nll))\n if m_mask is not None:\n nll = tf.where(m_mask, tf.zeros_like(nll), nll)\n nll = tf.reduce_sum(nll, [1, 2]) # shape=(M*K*BS)\n\n if self.K > 1:\n kl = qz_x.log_prob(z) - pz.log_prob(z) # shape=(M*K*BS, TL or d)\n kl = tf.where(tf.math.is_finite(kl), kl, tf.zeros_like(kl))\n kl = tf.reduce_sum(kl, 1) # shape=(M*K*BS)\n\n weights = -nll - kl # shape=(M*K*BS)\n weights = tf.reshape(weights, [self.M, self.K, -1]) # shape=(M, K, BS)\n\n elbo = self.reduce_logmeanexp(weights, axis=1) # shape=(M, 1, BS)\n elbo = tf.reduce_mean(elbo) # scalar\n else: # Do not use importance sampling\n # if K==1, compute KL analytically\n kl = self.kl_divergence(qz_x, pz) # shape=(M*K*BS, TL or d)\n kl = tf.where(tf.math.is_finite(kl), kl, tf.zeros_like(kl))\n kl = tf.reduce_sum(kl, 1) # shape=(M*K*BS)\n\n elbo = -nll - self.beta * kl # shape=(M*K*BS) K=1, implement ELBO = E[log p(x|z)] - KL(q || p)\n elbo = tf.reduce_mean(elbo) # scalar\n\n if return_parts:\n nll = tf.reduce_mean(nll) # scalar\n kl = tf.reduce_mean(kl) # scalar\n return -elbo, nll, kl\n else:\n return -elbo\n\n\n def compute_nll(self, x, y=None, m_mask=None):\n # Used only for evaluation\n assert len(x.shape) == 3, \"Input should have shape: [batch_size, time_length, data_dim]\"\n if y is None: y = x\n\n z_sample = self.encode(x).sample()\n x_hat_dist = self.decode(z_sample)\n nll = -x_hat_dist.log_prob(y) # shape=(BS, TL, D)\n nll = tf.where(tf.math.is_finite(nll), nll, tf.zeros_like(nll))\n if m_mask is not None:\n m_mask = tf.cast(m_mask, tf.bool)\n nll = tf.where(m_mask, nll, tf.zeros_like(nll)) # !!! inverse mask, set zeros for observed\n return tf.reduce_sum(nll)\n\n\n def compute_mse(self, x, y=None, m_mask=None, binary=False):\n # Used only during evaluation\n assert len(x.shape) == 3, \"Input should have shape: [batch_size, time_length, data_dim]\"\n if y is None: y = x\n\n z_mean = self.encode(x).mean()\n x_hat_mean = self.decode(z_mean).mean() # shape=(BS, TL, D)\n if binary:\n x_hat_mean = tf.round(x_hat_mean)\n mse = tf.math.squared_difference(x_hat_mean, y)\n if m_mask is not None:\n m_mask = tf.cast(m_mask, tf.bool)\n mse = tf.where(m_mask, mse, tf.zeros_like(mse)) # inverse mask, set zeros for observed values\n return tf.reduce_sum(mse)\n\n\n def _get_prior(self):\n if self.prior is None:\n # Compute kernel matrices for each latent dimension\n kernel_matrices = []\n for i in range(self.kernel_scales):\n if self.kernel == \"rbf\":\n kernel_matrices.append(rbf_kernel(self.time_length, self.length_scale / 2**i))\n elif self.kernel == \"matern\":\n kernel_matrices.append(matern_kernel(self.time_length, self.length_scale / 2**i))\n elif self.kernel == \"cauchy\":\n kernel_matrices.append(cauchy_kernel(self.time_length, self.sigma, self.length_scale / 2**i))\n\n # Combine kernel matrices for each latent dimension\n tiled_matrices = []\n total = 0\n for i in range(self.kernel_scales):\n if i == self.kernel_scales-1:\n multiplier = self.latent_dim - total\n else:\n multiplier = int(np.ceil(self.latent_dim / self.kernel_scales))\n total += multiplier\n tiled_matrices.append(tf.tile(tf.expand_dims(kernel_matrices[i], 0), [multiplier, 1, 1]))\n kernel_matrix_tiled = np.concatenate(tiled_matrices)\n assert len(kernel_matrix_tiled) == self.latent_dim\n self.prior = tfd.MultivariateNormalTriL(loc=tf.zeros([self.latent_dim, self.time_length]), scale_tril=tf.linalg.cholesky(kernel_matrix_tiled))\n # Below is depracated\n #self.prior = tfd.MultivariateNormalFullCovariance(\n #loc=tf.zeros([self.latent_dim, self.time_length]),\n #covariance_matrix=kernel_matrix_tiled)\n return self.prior\n \n\n def get_trainable_vars(self):\n self.compute_loss(tf.random.normal(shape=(1, self.time_length, self.data_dim), dtype=tf.float32),\n tf.zeros(shape=(1, self.time_length, self.data_dim), dtype=tf.float32), initialising=True)\n return self.trainable_variables\n\n\n def kl_divergence(self, a, b):\n \"\"\" Batched KL divergence `KL(a || b)` for multivariate Normals.\n See https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/distributions/mvn_linear_operator.py\n It's used instead of default KL class in order to exploit precomputed components for efficiency\n \"\"\"\n\n squared_frobenius_norm = lambda x: tf.reduce_sum(tf.square(x), axis=[-2, -1])\n\n # Helper to identify if `LinearOperator` has only a diagonal component\n is_diagonal = lambda x: (isinstance(x, tf.linalg.LinearOperatorIdentity) or\n isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or\n isinstance(x, tf.linalg.LinearOperatorDiag))\n\n if is_diagonal(a.scale) and is_diagonal(b.scale):\n # Using `stddev` because it handles expansion of Identity cases.\n b_inv_a = (a.stddev() / b.stddev())[..., tf.newaxis]\n else:\n if self.pz_scale_inv is None:\n self.pz_scale_inv = tf.linalg.inv(b.scale.to_dense())\n self.pz_scale_inv = tf.where(tf.math.is_finite(self.pz_scale_inv), self.pz_scale_inv, tf.zeros_like(self.pz_scale_inv))\n\n if self.pz_scale_log_abs_determinant is None:\n self.pz_scale_log_abs_determinant = b.scale.log_abs_determinant()\n\n a_shape = a.scale.shape\n if len(b.scale.shape) == 3:\n _b_scale_inv = tf.tile(self.pz_scale_inv[tf.newaxis], [a_shape[0]] + [1] * (len(a_shape) - 1))\n else:\n _b_scale_inv = tf.tile(self.pz_scale_inv, [a_shape[0]] + [1] * (len(a_shape) - 1))\n\n b_inv_a = _b_scale_inv @ a.scale.to_dense()\n\n # approx. 10x times faster on CPU than on GPU according to paper\n with tf.device('/cpu:0'):\n kl_div = (self.pz_scale_log_abs_determinant - \n a.scale.log_abs_determinant() +\n 0.5 * (-tf.cast(a.scale.domain_dimension_tensor(), a.dtype) +\n squared_frobenius_norm(b_inv_a) + \n squared_frobenius_norm(b.scale.solve((b.mean() - a.mean())[..., tf.newaxis]))))\n return kl_div\n\n # Only needed when using importance sampling\n def reduce_logmeanexp(self, x, axis, epsilon=1e-5):\n \"\"\"Implementation of log-mean-exponent.\n Args:\n x: The tensor to reduce.\n axis: The dimensions to reduce.\n eps: Floating point scalar to avoid log-underflow -> found issue occassionally when using random (unseeded) init\n Returns:\n log_mean_exp: A tensor representing log(Avg{exp(x): x}).\n \"\"\"\n x_max = tf.reduce_max(x, axis=axis, keepdims=True)\n return tf.math.log(tf.reduce_mean(tf.exp(x - x_max), axis=axis, keepdims=True) + epsilon) + x_max\n\n \n # Train and test modes from my experience with PyTorch, needed to indicate whether to augment at train time\n def train(self, augment_at_train_time=False):\n \"\"\"\n Place model in training mode.\n Allow augment_at_train_time to be set to true if specified\n \"\"\"\n self.training = True\n self.augment_at_train_time = augment_at_train_time\n\n \n def eval(self):\n \"\"\"\n Place model in evaluation mode -> disables augmentation\n \"\"\"\n self.training = False\n"
] | [
[
"tensorflow.device",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.cast",
"numpy.concatenate",
"tensorflow.random.set_seed",
"tensorflow.math.is_finite",
"numpy.ceil",
"tensorflow.square",
"tensorflow.tile",
"numpy.multiply",
"numpy.random.choice",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.zeros_like",
"tensorflow.round",
"numpy.array",
"tensorflow.reduce_max",
"tensorflow.transpose",
"numpy.random.seed",
"tensorflow.reduce_mean",
"tensorflow.math.squared_difference",
"tensorflow.reshape",
"tensorflow.expand_dims",
"numpy.random.uniform",
"sklearn.preprocessing.StandardScaler",
"tensorflow.linalg.cholesky",
"tensorflow.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fenglsuc/PatCit | [
"6f2585dac156a69ff94f002d387c75bd723529df"
] | [
"bin/prep-npl-class.py"
] | [
"import os\nfrom glob import glob\n\nimport pandas as pd\nimport spacy\nimport typer\nfrom tqdm import tqdm\nfrom wasabi import Printer\n\nVAR = [\"npl_publn_id\", \"npl_class\"]\nmsg = Printer()\n\n\n# Could certainly do better in terms of efficiency. Still OK -> takes around 4:30 to process\n# the full db\n\n\ndef get_pred_class(doc):\n cats_ = doc.cats\n pred_ = [k for k, v in cats_.items() if v > 0.5]\n out = pred_[0] if pred_ else \"Unknown\"\n return out\n\n\ndef main(\n path: str,\n spacy_model: str = typer.Option(\n default=\"models/npl_class/en_core_web_sm_npl-class-ensemble-1.0\",\n help=\"Path to the spaCy model with the 'textcat' pipe\",\n ),\n overwrite: bool = False,\n):\n \"\"\"\n Assumes that the input files have the following fields:\n - npl_biblio: raw citation\n - npl_ctype: citation type when already known, nan else\n \"\"\"\n nlp = spacy.load(spacy_model)\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in [\"textcat\"]]\n with nlp.disable_pipes(*other_pipes):\n\n files = glob(path)\n existing_files = glob(\n os.path.join(os.path.dirname(path), \"proc_\" + path.split(\"/\")[-1])\n )\n msg.info(f\"{','.join(existing_files)} already existing. Overwrite: {overwrite}\")\n\n for file in files:\n msg.info(f\"START: {file}\")\n fout = os.path.join(os.path.dirname(file), \"proc_\" + file.split(\"/\")[-1])\n if fout in existing_files and not overwrite:\n pass # we don't overwrite\n else:\n if os.path.isfile(fout):\n os.remove(fout) # we overwrite\n header = True\n for chunk in tqdm(pd.read_csv(file, chunksize=1e5)):\n todo = chunk.query(\"npl_ctype!=npl_ctype\").copy()\n done = chunk.query(\"npl_ctype==npl_ctype\").copy()\n msg.info(f\"{len(chunk)} rows ({len(done)} rows already assigned)\")\n\n done[\"npl_class\"] = done[\"npl_ctype\"]\n\n npls = todo[\"npl_biblio\"].astype(str).values\n todo[\"docs\"] = list(nlp.pipe(npls))\n todo[\"npl_class\"] = todo[\"docs\"].apply(\n lambda doc: get_pred_class(doc)\n )\n\n todo[VAR].append(done[VAR]).to_csv(\n fout, index=False, header=header, mode=\"a\"\n )\n header = False # we don't want the header in chunk_n with n>1\n msg.good(f\"DONE: {file}\")\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TripleEss/TDALayer | [
"25a2da5eab50fad2d006167c2d1c97ec5efb53e0"
] | [
"nn/levelset_dionysus.py"
] | [
"from ..functional.levelset_dionysus import Diagramlayer as levelsetdgm\r\nfrom ..util.process import remove_filler\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nfrom scipy.spatial import Delaunay\r\nimport dionysus as d\r\n\r\ndef init_freudenthal_2d(width, height):\r\n \"\"\"\r\n Freudenthal triangulation of 2d grid\r\n \"\"\"\r\n s = d.Filtration()\r\n # row-major format\r\n # 0-cells\r\n for i in range(height):\r\n for j in range(width):\r\n ind = i*width + j\r\n s.append(d.Simplex([ind]))\r\n # 1-cells\r\n for i in range(height):\r\n for j in range(width-1):\r\n ind = i*width + j\r\n s.append(d.Simplex([ind, ind + 1]))\r\n for i in range(height-1):\r\n for j in range(width):\r\n ind = i*width + j\r\n s.append(d.Simplex([ind, ind + width]))\r\n # 2-cells + diagonal 1-cells\r\n for i in range(height-1):\r\n for j in range(width-1):\r\n ind = i*width + j\r\n # diagonal\r\n s.append(d.Simplex([ind, ind + width + 1]))\r\n # 2-cells\r\n s.append(d.Simplex([ind, ind + 1, ind + width + 1]))\r\n s.append(d.Simplex([ind, ind + width, ind + width + 1]))\r\n return s\r\n\r\n\r\nclass LevelSetLayer(nn.Module):\r\n \"\"\"\r\n Level set persistence layer\r\n Parameters:\r\n size : (width, height) - tuple for image input dimensions\r\n maxdim : haximum homology dimension (default 1)\r\n complex :\r\n \"scipy\" - use scipy freudenthal triangulation (default)\r\n \"freudenthal\" - use canonical freudenthal triangulation\r\n \"\"\"\r\n def __init__(self, size, maxdim=1, complex=\"scipy\"):\r\n super(LevelSetLayer, self).__init__()\r\n self.size = size\r\n self.maxdim = maxdim\r\n self.fnobj = levelsetdgm()\r\n\r\n # extract width and height\r\n width, height = size\r\n if complex == \"scipy\":\r\n # initialize complex to use for persistence calculations\r\n axis_x = np.arange(0, width)\r\n axis_y = np.arange(0, height)\r\n grid_axes = np.array(np.meshgrid(axis_x, axis_y))\r\n grid_axes = np.transpose(grid_axes, (1, 2, 0))\r\n\r\n # creation of a complex for calculations\r\n tri = Delaunay(grid_axes.reshape([-1, 2]))\r\n faces = tri.simplices.copy()\r\n self.complex = self.fnobj.init_filtration(faces)\r\n elif complex == \"freudenthal\":\r\n self.complex = init_freudenthal_2d(width, height)\r\n else:\r\n AssertionError(\"bad complex type\")\r\n\r\n def forward(self, img):\r\n dgm = self.fnobj.apply(img, self.complex)\r\n #dgm = dgm[0:(self.maxdim+1),:,:]\r\n dgms = tuple(remove_filler(dgm[i], -np.inf) for i in range(self.maxdim+1))\r\n return dgms, False\r\n\r\n\r\ndef init_line_complex(p):\r\n \"\"\"\r\n initialize 1D complex on the line\r\n Input:\r\n p - number of 0-simplices\r\n Will add (p-1) 1-simplices\r\n \"\"\"\r\n f = d.Filtration()\r\n for i in range(p-1):\r\n c = d.closure([d.Simplex([i, i+1])], 1)\r\n for j in c:\r\n f.append(j)\r\n return f\r\n\r\n\r\nclass LevelSetLayer1D(nn.Module):\r\n \"\"\"\r\n Level set persistence layer\r\n Parameters:\r\n size : number of features\r\n only returns H0\r\n \"\"\"\r\n def __init__(self, size):\r\n super(LevelSetLayer1D, self).__init__()\r\n self.size = size\r\n self.fnobj = levelsetdgm()\r\n self.complex = init_line_complex(size)\r\n\r\n def forward(self, img):\r\n dgm = self.fnobj.apply(img, self.complex)\r\n dgm = dgm[0]\r\n return (dgm,), False\r\n"
] | [
[
"numpy.arange",
"numpy.meshgrid",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gevaertlab/DAPL | [
"afa5f52a6e638df056747db089c8ff2b0b9783b9"
] | [
"predict_random_missing.py"
] | [
"import os\nimport tensorflow as tf\nimport pandas as pd\nimport time\nfrom autoencoder import *\nimport random\nimport numpy as np\n\n \ndef reconstruct_loss(dataset_test_uncorrutped,dataset_test,autoencoder_fun,checkpoint_file='default.ckpt',missing_ind=None):\n input_image, reconstructed_image = autoencoder_fun(batch_shape)\n\n init = tf.global_variables_initializer()\n saver=tf.train.Saver()\n with tf.Session() as session:\n\n print((\"Loading variables from '%s'.\" % checkpoint_file))\n saver.restore(session, checkpoint_file)\n print('restored') \n dataset_size = dataset_test.shape[0]\n print(\"Dataset size:\", dataset_size) \n \n dataset_test=np.asarray(dataset_test).astype(\"float32\") \n dataset_test_uncorrutped=np.asarray(dataset_test_uncorrutped).astype(\"float32\") \n \n reconstruct= session.run(reconstructed_image, feed_dict={input_image: dataset_test}) \n loss=rmse_loss(reconstruct,dataset_test_uncorrutped,missing_ind)\n \n\n return(loss)\n\ndef rmse_loss(reconstructed, original,missing_ind):\n rmse = np.sqrt(((reconstructed[0,missing_ind] - original[0,missing_ind]) ** 2).mean())\n return rmse\n\n\ndef mask_dfrow(row,perc):\n sample=np.random.binomial(1,perc,size=row.size)\n corrupted=row*sample\n return(corrupted)\n\n\nif __name__ == '__main__':\n \n input_name=sys.argv[1] # 'testdata_100sample.csv'\n output_path=sys.argv[2] # \"testloss_100sample.csv\"\n model_path=sys.argv[3] # 'imputationmodel.ckpt'\n feature_size=sys.argv[4] #Dimension of the feature, 17176\n nonmissing_perc=sys.argv[5] #Percent of non-missing elements in the data, 0.7\n\n\n \n holdout_cohort= pd.read_csv(input_name) \n\n np.random.seed(1)\n corrupted_holdout_cohort=holdout_cohort.apply(mask_dfrow,perc=nonmissing_perc,axis=1)\n \n loss_list=0\n for i in range(0,corrupted_holdout_cohort.shape[0]):\n cur_test=corrupted_holdout_cohort.iloc[i:i+1,:]\n true_cur_test=holdout_cohort.iloc[i:i+1,:]\n \n missing_index=np.where(cur_test.iloc[0,:]==0)[0]\n batch_shape = (1, feature_size)\n np.set_printoptions(threshold=np.inf)\n tf.reset_default_graph()\n loss_val=reconstruct_loss(true_cur_test,cur_test,autoencoder4_d, model_path ,missing_index)\n \n loss_list=np.append(loss_list,loss_val) \n print(loss_val)\n if i%5==0:\n np.savetxt(output_path, loss_list, delimiter=\"\\t\")\n \n \n \n \n"
] | [
[
"pandas.read_csv",
"numpy.random.seed",
"numpy.asarray",
"numpy.set_printoptions",
"numpy.random.binomial",
"tensorflow.global_variables_initializer",
"numpy.append",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.where",
"numpy.savetxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
anonymouscontributor/cnr | [
"f0c793baddf67b8540ea90617e82d27269d367b9"
] | [
"cnr/utils.py"
] | [
"'''\nCollection of utility functions\n\n@date May 25, 2015\n'''\n\nimport numpy as np\nimport pickle, os\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\nimport mpl_toolkits.mplot3d.axes3d as p3\nfrom .Domains import nBox, DifferenceOfnBoxes\n\n\ndef plot_results(results, offset=500, directory=None, show=True):\n \"\"\" Plots and shows or saves (or both) the simulation results \"\"\"\n # set up figures\n ylimits = [[np.Infinity, -np.Infinity] for i in range(3)]\n plt.figure(1)\n plt.title('cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t')\n plt.figure(2)\n plt.title('time-avg. cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t')\n plt.figure(3)\n plt.title(r'log time-avg. cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t')\n # and now plot, depending on what data is there\n for result in results:\n if result.algo in ['DA', 'OGD']:\n try:\n plt.figure(1)\n lavg = plt.plot(result.regs_norate['savg'][0], linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_norate['perc_10'][0],\n result.regs_norate['perc_90'][0], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(result.problem.T)[offset:], result.regs_norate['tsavg'][0][offset:],\n linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_norate['tavg_perc_10'][0][offset:],\n result.regs_norate['tavg_perc_90'][0][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T))\n plt.figure(3)\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavg'][0], linewidth=2.0,\n label=result.label, rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_norate['tavg_perc_10'][0],\n result.regs_norate['tavg_perc_90'][0], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavgbnd'][0], '--',\n color=lltsavg[0].get_color(), linewidth=2, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_norate['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_norate['tsavgbnd'][0]))\n except AttributeError: pass\n try:\n for i,(T,eta) in enumerate(result.etaopts.items()):\n plt.figure(1)\n lavg = plt.plot(result.regs_etaopts['savg'][i][0:T], linewidth=2.0,\n label=result.label+' '+r' $\\eta_{{opt}}(T={0:.1e}) = {1:.3f}$'.format(T, eta), rasterized=True)\n plt.plot(np.arange(T,result.problem.T), result.regs_etaopts['savg'][i][T:], '--',\n color=lavg[0].get_color(), linewidth=2, rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_etaopts['perc_10'][i],\n result.regs_etaopts['perc_90'][i], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(offset,T), result.regs_etaopts['tsavg'][i][offset:T], linewidth=2.0,\n label=result.label+' '+r' $\\eta_{{opt}}(T={0:.1e}) = {1:.3f}$'.format(T, eta), rasterized=True)\n plt.plot(np.arange(T,result.problem.T), result.regs_etaopts['tsavg'][i][T:], '--',\n color=ltavg[0].get_color(), linewidth=2, rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_etaopts['tavg_perc_10'][i][offset:],\n result.regs_etaopts['tavg_perc_90'][i][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T))\n plt.figure(3)\n llogtavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_etaopts['tsavg'][i],\n linewidth=2.0, label=result.label+' '+r' $\\eta_{{opt}}(T={0:.1e}) = {1:.3f}$'.format(T, eta), rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_etaopts['tavg_perc_10'][i],\n result.regs_etaopts['tavg_perc_90'][i], color=llogtavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_etaopts['tsavgbnd'][i], '--',\n color=llogtavg[0].get_color(), linewidth=2, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_etaopts['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_etaopts['tsavgbnd'][0]))\n #\n except AttributeError: pass\n try:\n for i,eta in enumerate(result.etas):\n plt.figure(1)\n lavg = plt.plot(result.regs_etas['savg'][i], linewidth=2.0, label=result.label+' '+r' $\\eta = {0:.3f}$'.format(eta), rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_etas['perc_10'][i],\n result.regs_etas['perc_90'][i], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(offset,result.problem.T), result.regs_etas['tsavg'][i][offset:],\n linewidth=2.0, label=result.label+' '+r'$\\eta = {0:.3f}$'.format(eta), rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_etas['tavg_perc_10'][i][offset:],\n result.regs_etas['tavg_perc_90'][i][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T))\n plt.figure(3)\n llogtavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_etas['tsavg'][i], linewidth=2.0,\n label=result.label+' '+r' $\\eta = {0:.3f}$'.format(eta), rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_etas['tavg_perc_10'][i],\n result.regs_etas['tavg_perc_90'][i], color=llogtavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_etas['tsavgbnd'][i], '--',\n color=llogtavg[0].get_color(), linewidth=2, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_etaos['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_etas['tsavgbnd'][0]))\n except AttributeError: pass\n try:\n for i,alpha in enumerate(result.alphas):\n plt.figure(1)\n lavg = plt.plot(result.regs_alphas['savg'][i], linewidth=2.0,\n label=result.label+' '+r' $\\eta_t = {0} \\cdot t^{{{1}}}$'.format(result.thetas[i], -alpha), rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_alphas['perc_10'][i],\n result.regs_alphas['perc_90'][i], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(result.problem.T)[offset:], result.regs_alphas['tsavg'][i][offset:], linewidth=2.0,\n label=result.label+' '+r' $\\eta_t = {0} \\cdot t^{{{1}}}$'.format(result.thetas[i], -alpha), rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_alphas['tavg_perc_10'][i][offset:],\n result.regs_alphas['tavg_perc_90'][i][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T))\n plt.figure(3)\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_alphas['tsavg'][i], linewidth=2.0,\n label=result.label+' '+r' $\\eta_t = {0} \\cdot t^{{{1}}}$'.format(result.thetas[i], -alpha), rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_alphas['tavg_perc_10'][i],\n result.regs_alphas['tavg_perc_90'][i], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_alphas['tsavgbnd'][i], '--', color=lltsavg[0].get_color(),\n linewidth=2.0, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_alphas['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_alphas['tsavgbnd'][0]))\n except AttributeError: pass\n else:\n plt.figure(1)\n lavg = plt.plot(result.regs_norate['savg'][0], linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(result.problem.T), result.regs_norate['perc_10'][0],\n result.regs_norate['perc_90'][0], color=lavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.figure(2)\n ltavg = plt.plot(np.arange(result.problem.T)[offset:], result.regs_norate['tsavg'][0][offset:],\n linewidth=2.0, label=result.label, rasterized=True)\n plt.fill_between(np.arange(offset,result.problem.T), result.regs_norate['tavg_perc_10'][0][offset:],\n result.regs_norate['tavg_perc_90'][0][offset:], color=ltavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.xlim((0, result.problem.T))\n plt.figure(3)\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavg'][0], linewidth=2.0,\n label=result.label, rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_norate['tavg_perc_10'][0],\n result.regs_norate['tavg_perc_90'][0], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True)\n plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavgbnd'][0], '--',\n color=lltsavg[0].get_color(), linewidth=2, rasterized=True)\n ylimits[2][0] = np.minimum(ylimits[2][0], np.min(result.regs_norate['tsavg'][0]))\n ylimits[2][1] = np.maximum(ylimits[2][1], 1.1*np.max(result.regs_norate['tsavgbnd'][0]))\n\n # make plots pretty and show legend\n plt.figure(1)\n plt.legend(loc='upper left', prop={'size':13}, frameon=False)\n plt.figure(2)\n plt.legend(loc='upper right', prop={'size':13}, frameon=False)\n plt.figure(3)\n plt.yscale('log'), plt.xscale('log')\n# plt.ylim(np.log(ylimits[2][0]), np.log(ylimits[2][1]))\n plt.legend(loc='upper right', prop={'size':13}, frameon=False)\n if directory:\n os.makedirs(directory+'figures/', exist_ok=True) # this could probably use a safer implementation\n filename = '{}{}_{}_'.format(directory+'figures/', results[0].problem.desc, results[0].problem.lossfuncs[0].desc)\n plt.figure(1)\n plt.savefig(filename + 'cumloss.pdf', bbox_inches='tight', dpi=300)\n plt.figure(2)\n plt.savefig(filename + 'tavgloss.pdf', bbox_inches='tight', dpi=300)\n plt.figure(3)\n plt.savefig(filename + 'loglogtavgloss.pdf', bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n\n\ndef plot_dims(results, directory=None, show=True):\n \"\"\" Plots and shows or saves (or both) the simulation results \"\"\"\n # set up figures\n# ylimits = [np.Infinity, -np.Infinity]\n f = plt.figure()\n plt.title(r'log time-avg. cumulative regret, {} losses'.format(results[0].problem.lossfuncs[0].desc))\n plt.xlabel('t')\n dim_styles = {2:'--', 3:'-.', 4:':'}\n # and now plot, depending on what data is there\n for loss_results in results:\n for result in loss_results:\n lltsavg = plt.plot(np.arange(1,result.problem.T+1), result.regs_norate['tsavg'], linewidth=2.0,\n linestyle=dim_styles[result.dim], label=result.label, rasterized=True)\n plt.fill_between(np.arange(1,result.problem.T+1), result.regs_norate['tavg_perc_10'], result.regs_norate['tavg_perc_90'],\n linestyle=dim_styles[result.dim], color=lltsavg[0].get_color(), alpha=0.1, rasterized=True)\n # make plots pretty and show legend\n plt.yscale('log'), plt.xscale('log')\n plt.legend(loc='upper right', prop={'size':13}, frameon=False)\n if directory:\n os.makedirs(directory+'figures/', exist_ok=True) # this could probably use a safer implementation\n filename = '{}{}_{}_'.format(directory+'figures/', results[0].problem.desc, results[0].problem.lossfuncs[0].desc)\n plt.savefig(filename + 'loglogtavgloss.pdf', bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n plt.close()\n\n\n\ndef plot_loglogs(results, directory=None, show=True, bounds=True, **kwargs):\n \"\"\" Plots and shows or saves (or both) the simulation results \"\"\"\n # set up figures\n f = plt.figure()\n loss_title = list(results[0].values())[0].problem.lossfuncs[0].desc\n plt.title(r'log time-avg. cumulative regret, {} losses'.format(loss_title))\n plt.xlabel('t')\n colors = ['k', 'r', 'g', 'b', 'c', 'm', 'y']*3\n loss_styles = ['-', '--', '-.', ':']*3\n labs = kwargs.get('labels')\n # and now plot, depending on what data is there\n for i,loss_results in enumerate(results):\n for j,key in enumerate(loss_results.keys()):\n r = loss_results[key]\n if labs is not None:\n lab = labs[i][j]\n print(lab)\n else:\n lab = r.label\n lltsavg = plt.plot(np.arange(1,r.problem.T+1), r.regs_norate['tsavg'][0], linewidth=2.0,\n linestyle=loss_styles[i], color=colors[j], label=lab, rasterized=True)\n plt.fill_between(np.arange(1,r.problem.T+1), r.regs_norate['tavg_perc_10'][0], r.regs_norate['tavg_perc_90'][0],\n linestyle=loss_styles[i], color=colors[j], alpha=0.1, rasterized=True)\n if bounds:\n try:\n plt.plot(np.arange(1,r.problem.T+1), r.regs_norate['tsavgbnd'][0],\n color=colors[j], linewidth=3, rasterized=True)\n except IndexError: pass\n # make plots pretty and show legend\n plt.yscale('log'), plt.xscale('log')\n plt.legend(prop={'size':12}, frameon=False, **kwargs) #loc='lower center',\n if directory:\n os.makedirs(directory, exist_ok=True) # this could probably use a safer implementation\n filename = '{}{}_{}_'.format(directory, list(results[0].values())[0].problem.desc,\n list(results[0].values())[0].problem.lossfuncs[0].desc)\n plt.savefig(filename + 'loglogtavgloss.pdf', bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n plt.close()\n\n\ndef plot_snapshots(results, times, filename=None, show=False, **kwargs):\n \"\"\" Creates a sequence of plots from the pltdata array in the results at the\n time steps specified in times (will be ordered increasing).\n Here results is an iterable of results. The resulting figure will have\n len(results) x len(times) plots. \"\"\"\n pltpoints = results[0].problem.pltpoints\n fig = plt.figure(figsize=kwargs.get('figsize'))\n # idk why the FUCK this does not work just using np arrays!?\n zmax = np.max([np.max([np.max([np.max(df) for df in dflat]) for dflat in result.pltdata]) for result in results])\n zmin = np.min([np.min([np.min([np.min(df) for df in dflat]) for dflat in result.pltdata]) for result in results])\n for i,result in enumerate(results):\n bbox = result.problem.domain.bbox()\n for j,time in enumerate(np.sort(times)):\n ax = fig.add_subplot(len(results), len(times), len(times)*i+j+1, projection='3d')\n for points,dat in zip(pltpoints, result.pltdata[time]):\n ax.plot_trisurf(points[:,0], points[:,1], dat, cmap=plt.get_cmap('jet'),\n linewidth=0, vmin=zmin, vmax=zmax)\n # Setting the axes properties\n ax.set_xlim3d(bbox.bounds[0])\n ax.set_xlabel('$s_1$')\n ax.set_ylim3d(bbox.bounds[1])\n ax.set_ylabel('$s_2$')\n ax.set_zlim3d([-0.1, zmax])\n ax.set_zlabel('$x$')\n ax.set_title('t={}'.format(time))\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.view_init(elev=kwargs.get('elev'), azim=kwargs.get('azim'))\n plt.tight_layout()\n# if directory:\n# os.makedirs(directory, exist_ok=True) # this could probably use a safer implementation\n# filename = '{}{}_{}_'.format(directory, results[0].problem.desc,\n# results[0].problem.lossfuncs[0].desc)\n# plt.savefig(filename + 'snapshots.pdf', bbox_inches='tight', dpi=300)\n if filename is not None:\n plt.savefig(filename, bbox_inches='tight', dpi=300)\n if show:\n plt.show()\n plt.close()\n\n\ndef save_results(results, directory):\n \"\"\" Serializes a results object for persistent storage using the pickle module. \"\"\"\n os.makedirs(directory, exist_ok=True) # this could probably use a safer implementation\n slope_txt = []\n for result in results:\n try:\n [slope_txt.append('{}, Empirical: {}\\n'.format(result.label, val[0])) for val in result.slopes.values()]\n [slope_txt.append('{}, Bounds: {}\\n'.format(result.label, val[0])) for val in result.slopes_bnd.values()]\n del result.problem.pltpoints, result.problem.data\n except (AttributeError, IndexError):\n pass\n slopes_name = '{}{}_{}_slopes.txt'.format(directory, results[0].problem.desc,\n results[0].problem.lossfuncs[0].desc)\n with open(slopes_name, 'w') as f:\n f.writelines(slope_txt)\n pigglname = '{}{}_{}.piggl'.format(directory, results[0].problem.desc,\n results[0].problem.lossfuncs[0].desc)\n with open(pigglname, 'wb') as f:\n pickle.dump(results, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef visualize_potentials(potentials, xlim=(-1,5), **kwargs):\n u = np.linspace(xlim[0], xlim[1], 1000)\n plt.figure(figsize=kwargs.get('figsize'))\n labels = kwargs.get('labels')\n if labels is None:\n labels = [pot.desc for pot in potentials]\n if kwargs.get('semilogy') == True:\n for vals,label in zip([pot.phi(u) for pot in potentials], labels):\n plt.semilogy(u, 1+vals, label=label)\n else:\n for vals,label in zip([pot.phi(u) for pot in potentials], labels):\n plt.plot(u, vals, label=label, linewidth=2)\n plt.ylim(kwargs.get('ylim'))\n plt.xlabel('$u$', fontsize=15)\n plt.ylabel('$\\phi(u)$', fontsize=15)\n plt.legend(loc=kwargs.get('loc'), frameon=False)\n plt.title('Various $\\omega$-potentials')\n plt.tight_layout()\n if kwargs.get('filename') is not None:\n plt.savefig(kwargs.get('filename'), bbox_inches='tight', dpi=300)\n if kwargs.get('show') is not False:\n plt.show()\n plt.close()\n\n\ndef circular_tour(domain, N):\n \"\"\" Returns a sequence of N points that wander around in a circle\n in the domain. Used for understanding various learning rates. \"\"\"\n if domain.n != 2:\n raise Exception('For now circular_tour only works in dimension 2')\n if isinstance(domain, nBox):\n center = np.array([0.5*(bnd[0]+bnd[1]) for bnd in domain.bounds])\n halfaxes = np.array([0.75*0.5*(bnd[1]-bnd[0]) for bnd in domain.bounds])\n return np.array([center[0] + halfaxes[0]*np.cos(np.linspace(0,2*np.pi,N)),\n center[1] + halfaxes[1]*np.sin(np.linspace(0,2*np.pi,N))]).T\n if isinstance(domain, DifferenceOfnBoxes) and (len(domain.inner) == 1):\n lengths = [bound[1] - bound[0] for bound in domain.outer.bounds]\n weights = np.array(lengths*2)/2/np.sum(lengths)\n bnds_inner, bnds_outer = domain.inner[0].bounds, domain.outer.bounds\n xs = np.concatenate([np.linspace(0.5*(bnds_inner[0][0]+bnds_outer[0][0]), 0.5*(bnds_inner[0][1]+bnds_outer[0][1]), weights[0]*N),\n 0.5*(bnds_outer[0][1]+bnds_inner[0][1])*np.ones(weights[1]*N),\n np.linspace(0.5*(bnds_inner[0][1]+bnds_outer[0][1]), 0.5*(bnds_inner[0][0]+bnds_outer[0][0]), weights[2]*N),\n 0.5*(bnds_outer[0][0]+bnds_inner[0][0])*np.ones(weights[3]*N)])\n ys = np.concatenate([0.5*(bnds_outer[1][0]+bnds_inner[1][0])*np.ones(weights[0]*N),\n np.linspace(0.5*(bnds_outer[1][0]+bnds_inner[1][0]), 0.5*(bnds_inner[1][1]+bnds_outer[1][1]), weights[1]*N),\n 0.5*(bnds_outer[1][1]+bnds_inner[1][1])*np.ones(weights[2]*N),\n np.linspace(0.5*(bnds_inner[1][1]+bnds_outer[1][1]), 0.5*(bnds_inner[1][0]+bnds_outer[1][0]), weights[3]*N)])\n return np.array([xs, ys]).T\n else:\n raise Exception('For now circular_tour only works on nBoxes and the difference of 2 nBoxes')\n\ndef quicksample(bounds, A, eta):\n \"\"\" Function returning actions sampled from the solution of the Dual Averaging\n update on an Box with Affine losses, Exponential Potential. \"\"\"\n C1, C2 = np.exp(-eta*A*bounds[:,0]), np.exp(-eta*A*bounds[:,1])\n Finv = lambda U: -np.log(C1 - (C1-C2)*U)/A/eta\n np.random.seed()\n return Finv(np.random.rand(*A.shape))\n\ndef CNR_worker(prob, *args, **kwargs):\n \"\"\" Helper function for wrapping class methods to allow for easy\n use of the multiprocessing package for parallel computing \"\"\"\n return prob.run_simulation(*args, **kwargs)\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.log",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.semilogy",
"numpy.random.seed",
"matplotlib.pyplot.yscale",
"numpy.sort",
"numpy.ones",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xscale"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wild-flame/rafiki | [
"08f44bc2f2f036183658366889b9513cc967c39d"
] | [
"examples/models/image_classification/TfFeedForward.py"
] | [
"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.client import device_lib\nimport json\nimport os\nimport tempfile\nimport numpy as np\nimport base64\n\nfrom rafiki.config import APP_MODE\nfrom rafiki.model import BaseModel, InvalidModelParamsException, test_model_class, \\\n IntegerKnob, CategoricalKnob, FloatKnob, FixedKnob, dataset_utils, logger\nfrom rafiki.constants import TaskType, ModelDependency\n\nclass TfFeedForward(BaseModel):\n '''\n Implements a fully-connected feed-forward neural network with variable hidden layers on Tensorflow \n for simple image classification\n '''\n @staticmethod\n def get_knob_config():\n return {\n 'epochs': IntegerKnob(3, 10 if APP_MODE != 'DEV' else 3),\n 'hidden_layer_count': IntegerKnob(1, 8 if APP_MODE != 'DEV' else 2),\n 'hidden_layer_units': IntegerKnob(2, 128),\n 'learning_rate': FloatKnob(1e-5, 1e-1, is_exp=True),\n 'batch_size': CategoricalKnob([16, 32, 64, 128]),\n 'image_size': FixedKnob(32)\n }\n\n def __init__(self, **knobs):\n super().__init__(**knobs)\n self._knobs = knobs\n self._graph = tf.Graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self._sess = tf.Session(graph=self._graph, config=config)\n \n def train(self, dataset_uri):\n im_sz = self._knobs.get('image_size')\n bs = self._knobs.get('batch_size')\n ep = self._knobs.get('epochs')\n\n logger.log('Available devices: {}'.format(str(device_lib.list_local_devices())))\n\n # Define 2 plots: Loss against time, loss against epochs\n logger.define_loss_plot()\n logger.define_plot('Loss Over Time', ['loss'])\n\n dataset = dataset_utils.load_dataset_of_image_files(dataset_uri, image_size=[im_sz, im_sz])\n num_classes = dataset.classes\n (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset])\n images = np.asarray(images)\n classes = np.asarray(classes)\n\n with self._graph.as_default():\n self._model = self._build_model(num_classes)\n with self._sess.as_default():\n self._model.fit(\n images, \n classes, \n verbose=0,\n epochs=ep,\n batch_size=bs,\n callbacks=[\n tf.keras.callbacks.LambdaCallback(on_epoch_end=self._on_train_epoch_end)\n ]\n )\n\n # Compute train accuracy\n (loss, accuracy) = self._model.evaluate(images, classes)\n logger.log('Train loss: {}'.format(loss))\n logger.log('Train accuracy: {}'.format(accuracy))\n\n def evaluate(self, dataset_uri):\n im_sz = self._knobs.get('image_size')\n\n dataset = dataset_utils.load_dataset_of_image_files(dataset_uri, image_size=[im_sz, im_sz])\n (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset])\n images = np.asarray(images)\n classes = np.asarray(classes)\n\n with self._graph.as_default():\n with self._sess.as_default():\n (loss, accuracy) = self._model.evaluate(images, classes)\n logger.log('Test loss: {}'.format(loss))\n\n return accuracy\n\n def predict(self, queries):\n im_sz = self._knobs.get('image_size')\n\n X = dataset_utils.resize_as_images(queries, image_size=[im_sz, im_sz])\n with self._graph.as_default():\n with self._sess.as_default():\n probs = self._model.predict(X)\n \n return probs.tolist()\n\n def destroy(self):\n self._sess.close()\n\n def dump_parameters(self):\n params = {}\n\n # Save model parameters\n with tempfile.NamedTemporaryFile() as tmp:\n # Save whole model to temp h5 file\n with self._graph.as_default():\n with self._sess.as_default():\n self._model.save(tmp.name)\n \n # Read from temp h5 file & encode it to base64 string\n with open(tmp.name, 'rb') as f:\n h5_model_bytes = f.read()\n\n params['h5_model_base64'] = base64.b64encode(h5_model_bytes).decode('utf-8')\n\n return params\n\n def load_parameters(self, params):\n # Load model parameters\n h5_model_base64 = params.get('h5_model_base64', None)\n if h5_model_base64 is None:\n raise InvalidModelParamsException()\n\n with tempfile.NamedTemporaryFile() as tmp:\n # Convert back to bytes & write to temp file\n h5_model_bytes = base64.b64decode(h5_model_base64.encode('utf-8'))\n with open(tmp.name, 'wb') as f:\n f.write(h5_model_bytes)\n\n # Load model from temp file\n with self._graph.as_default():\n with self._sess.as_default():\n self._model = keras.models.load_model(tmp.name)\n\n def _on_train_epoch_end(self, epoch, logs):\n loss = logs['loss']\n logger.log_loss(loss, epoch)\n\n def _build_model(self, num_classes):\n units = self._knobs.get('hidden_layer_units')\n layers = self._knobs.get('hidden_layer_count')\n lr = self._knobs.get('learning_rate')\n im_sz = self._knobs.get('image_size')\n\n model = keras.Sequential()\n model.add(keras.layers.Flatten(input_shape=(im_sz, im_sz,)))\n model.add(keras.layers.BatchNormalization())\n\n for _ in range(layers):\n model.add(keras.layers.Dense(units, activation=tf.nn.relu))\n\n model.add(keras.layers.Dense(\n num_classes, \n activation=tf.nn.softmax\n ))\n \n model.compile(\n optimizer=keras.optimizers.Adam(lr=lr),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n return model\n\n\nif __name__ == '__main__':\n test_model_class(\n model_file_path=__file__,\n model_class='TfFeedForward',\n task=TaskType.IMAGE_CLASSIFICATION,\n dependencies={\n ModelDependency.TENSORFLOW: '1.12.0'\n },\n train_dataset_uri='data/fashion_mnist_for_image_classification_train.zip',\n test_dataset_uri='data/fashion_mnist_for_image_classification_test.zip',\n queries=[\n [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 7, 0, 37, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 27, 84, 11, 0, 0, 0, 0, 0, 0, 119, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 88, 143, 110, 0, 0, 0, 0, 22, 93, 106, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 53, 129, 120, 147, 175, 157, 166, 135, 154, 168, 140, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 11, 137, 130, 128, 160, 176, 159, 167, 178, 149, 151, 144, 0, 0], \n [0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 0, 3, 0, 0, 115, 114, 106, 137, 168, 153, 156, 165, 167, 143, 157, 158, 11, 0], \n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 89, 139, 90, 94, 153, 149, 131, 151, 169, 172, 143, 159, 169, 48, 0], \n [0, 0, 0, 0, 0, 0, 2, 4, 1, 0, 0, 0, 98, 136, 110, 109, 110, 162, 135, 144, 149, 159, 167, 144, 158, 169, 119, 0], \n [0, 0, 2, 2, 1, 2, 0, 0, 0, 0, 26, 108, 117, 99, 111, 117, 136, 156, 134, 154, 154, 156, 160, 141, 147, 156, 178, 0], \n [3, 0, 0, 0, 0, 0, 0, 21, 53, 92, 117, 111, 103, 115, 129, 134, 143, 154, 165, 170, 154, 151, 154, 143, 138, 150, 165, 43], \n [0, 0, 23, 54, 65, 76, 85, 118, 128, 123, 111, 113, 118, 127, 125, 139, 133, 136, 160, 140, 155, 161, 144, 155, 172, 161, 189, 62], \n [0, 68, 94, 90, 111, 114, 111, 114, 115, 127, 135, 136, 143, 126, 127, 151, 154, 143, 148, 125, 162, 162, 144, 138, 153, 162, 196, 58], \n [70, 169, 129, 104, 98, 100, 94, 97, 98, 102, 108, 106, 119, 120, 129, 149, 156, 167, 190, 190, 196, 198, 198, 187, 197, 189, 184, 36], \n [16, 126, 171, 188, 188, 184, 171, 153, 135, 120, 126, 127, 146, 185, 195, 209, 208, 255, 209, 177, 245, 252, 251, 251, 247, 220, 206, 49], \n [0, 0, 0, 12, 67, 106, 164, 185, 199, 210, 211, 210, 208, 190, 150, 82, 8, 0, 0, 0, 178, 208, 188, 175, 162, 158, 151, 11], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n ]\n )\n"
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.Graph",
"tensorflow.keras.callbacks.LambdaCallback",
"tensorflow.python.client.device_lib.list_local_devices",
"numpy.asarray",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"tensorflow.ConfigProto",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.optimizers.Adam",
"tensorflow.Session",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
SmartChemDesign/gcnn_mol_transfer | [
"ba6908d79c3da442e73ba17cc5025eb7fb5fb570"
] | [
"run_train.py"
] | [
"import os\r\nimport tensorflow as tf\r\nfrom keras import backend\r\nimport sys\r\n\r\nsys.path.append(\"./Source\")\r\nfrom Keras_graphconv import GraphConvModelMod\r\nfrom TransferTrainer import TransferTrainer\r\n\r\nconfig = tf.ConfigProto(log_device_placement=True)\r\nconfig.gpu_options.allow_growth = True\r\nsess = tf.Session(config=config)\r\n\r\nbackend.clear_session()\r\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\nHYPERPARAMETERS = {\"classification\": {\"n_tasks\": 1, \"graph_conv_layers\": [128, 64], \"dense_layer_size\": 64,\r\n \"dropout\": 0, \"mode\": \"classification\", \"number_atom_features\": 75,\r\n \"n_classes\": 2, \"batch_size\": 64, \"num_dense\": 3, \"learning_rate\": 0.001,\r\n \"transfer\": False},\r\n \"regression\": {\"n_tasks\": 1, \"graph_conv_layers\": [128, 64], \"dense_layer_size\": 64,\r\n \"dropout\": 0.0, \"mode\": \"regression\", \"number_atom_features\": 75,\r\n \"uncertainty\": False, \"learning_rate\": 0.001,\r\n \"batch_size\": 10, \"num_dense\": 3, \"transfer\": False}}\r\n\r\npath_to_data = \"Datasets/logS_logP_dataset.sdf\"\r\nvaluename = \"logS\"\r\nsource_fold_folder = \"Donor_models/Chembl_models/regression_Chembl_data_bin_logP_2020_07_16_21_44_36\"\r\noutput_folder = \"Output\"\r\nmode = \"regression\"\r\n\r\ntrainer = TransferTrainer(GraphConvModelMod, mode=mode,\r\n output_folder=output_folder, n_epochs=1000, n_split=5,\r\n es_steps=20, parameters=HYPERPARAMETERS[mode],\r\n frac_train=0.8, batch_size=100,\r\n )\r\n\r\n# trainer.restore_model_params(source_fold_folder, layers_to_freeze=[\"graph_conv\"])\r\ntrainer.vectorize_training_data(path_to_data, valuename)\r\ntrainer.train_cv_models()\r\ntrainer.post_plots_metrics()\r\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
anguyen8/sam | [
"6f9525adacb65b4f5e00bbea23a1e37c9008db27"
] | [
"Sensitivity_Analysis_Basic_Occlusion_Comp_With_Default_Settings.py"
] | [
"import argparse, time, os, sys, glob, warnings, ipdb, math\nfrom RISE_evaluation import CausalMetric, auc, gkern\nfrom skimage.measure import compare_ssim as ssim\nfrom scipy.stats import spearmanr, pearsonr\nfrom skimage.transform import resize\nfrom itertools import combinations\nfrom skimage.feature import hog\nfrom srblib import abs_path\nfrom copy import deepcopy\nfrom RISE_utils import *\nimport utils as eutils\nimport torch.nn as nn\nimport numpy as np\nimport torch\nimport settings\n\ndef get_arguments():\n # Initialize the parser\n parser = argparse.ArgumentParser(description='Paramters for sensitivity analysis of heatmaps')\n\n parser.add_argument('-idp', '--input_dir_path', help='Path of the input directory', metavar='DIR')\n\n parser.add_argument('-op', '--out_path',\n help='Path of the output directory where you want to save the text files (Default is ./)')\n\n parser.add_argument('-mn', '--method_name', choices=['occlusion'],\n #['occlusion', 'ig', 'sg', 'grad', 'lime', 'mp', 'inpgrad'],\n help='Method you are analysing')\n\n parser.add_argument('--exp_num', choices=['a01', 'a02', 'a03'],\n help=f'Which experiment of Occlusion'\n f'a01 - Patch 5, 6, 7'\n f'a02 - Patch - 5, 17, 29, 41, 53'\n f'a03 - Patch - 52, 53, 54')\n\n parser.add_argument('--metric_name', choices=['ssim', 'hog', 'spearman'],\n help='Metric to be computed')\n\n # parser.add_argument('--num_variations', type=int,\n # help='Number of variations for a particular method.')\n\n # parser.add_argument('--no_img_name_dir_flag', action='store_false', default=True,\n # help=f'Flag to say that image name is stored as seperate directory in the input path.'\n # f'Default=True')\n\n # parser.add_argument('--no_model_name_dir_flag', action='store_false', default=True,\n # help=f'Flag to say that model name is stored as seperate directory in the input path. '\n # f'Default=True')\n\n parser.add_argument('--idx_flag', type=int,\n help=f'Flag whether to use some images in the folder (1) or all (0). '\n f'This is just for testing purposes. '\n f'Default=0', default=0,\n )\n\n parser.add_argument('-s_idx', '--start_idx', type=int,\n help='Start index for selecting images. Default: 0', default=0,\n )\n\n parser.add_argument('-e_idx', '--end_idx', type=int,\n help='End index for selecting images. Default: 2K', default=1735,\n )\n\n # parser.add_argument('--if_random', action='store_true', default=False,\n # help=f'Flag to say you want to compute results for baseline'\n # f'Default=False')\n\n # Parse the arguments\n args = parser.parse_args()\n args.no_model_name_dir_flag = False\n args.if_random = False\n args.no_img_name_dir_flag = True\n\n # if args.if_random:\n # np.random.seed(0)\n\n # if args.num_variations is None:\n # print('Please provide this number.\\nExiting')\n # sys.exit(0)\n # elif args.num_variations < 2:\n # print('This number cant be less than 2.\\nExiting')\n # sys.exit(0)\n\n if args.method_name is None:\n print('Please provide the name of the method.\\nExiting')\n sys.exit(0)\n\n if args.exp_num is None:\n print('Please provide the experiment number.\\nExiting')\n sys.exit(0)\n\n if args.metric_name is None:\n print('Please provide the name of the metric.\\nExiting')\n sys.exit(0)\n\n if args.input_dir_path is None:\n print('Please provide image dir path. Exiting')\n sys.exit(1)\n args.input_dir_path = abs_path(args.input_dir_path)\n\n if args.out_path is None:\n args.out_path = './'\n args.out_path = os.path.abspath(args.out_path)\n\n return args\n\n\n########################################################################################################################\ndef compute_score(h1, h2, method, metric_name):\n\n if metric_name.lower() == 'ssim':\n if method.lower() == 'mp':\n data_range = 1\n else:\n data_range = 2\n out = ssim(h1, h2, data_range=data_range, win_size=5) #to match the implementation of Been KIM\n\n elif metric_name.lower() == 'hog':\n hog1 = hog(h1, pixels_per_cell=(16, 16))\n hog2 = hog(h2, pixels_per_cell=(16, 16))\n out, _ = pearsonr(hog1, hog2)\n\n\n elif metric_name.lower() == 'spearman':\n out, _ = spearmanr(h1, h2, axis=None)\n\n else:\n print(f'Still not implemented.\\nExiting')\n sys.exit(1)\n\n return out\n\n\n########################################################################################################################\nif __name__ == '__main__':\n base_img_dir = abs_path(settings.imagenet_val_path)\n # base_img_dir = '/home/naman/CS231n/heatmap_tests/images/ILSVRC2012_img_val'\n # text_file = f'/home/naman/CS231n/heatmap_tests/' \\\n # f'Madri/Madri_New/robustness_applications/img_name_files/' \\\n # f'time_15669152608009198_seed_0_' \\\n # f'common_correct_imgs_model_names_madry_ressnet50_googlenet.txt'\n s_time = time.time()\n f_time = ''.join(str(s_time).split('.'))\n args = get_arguments()\n im_label_map = eutils.imagenet_label_mappings()\n eutils.mkdir_p(args.out_path)\n\n img_filenames = os.listdir(args.input_dir_path)\n img_filenames = [i for i in img_filenames if 'ILSVRC2012_val_000' in i and int(i.split('_')[-1]) in range(1, 50001)]\n if args.idx_flag == 1:\n img_filenames = img_filenames[0]\n\n # ## TODO: Chnages here\n # incorrect_img_list = np.load('/home/naman/CS231n/heatmap_tests/Madri/Madri_New/'\n # 'robustness_applications/img_name_files/incorrect_img_names.npy').tolist()\n\n ##############################################################\n model_names = []\n model_names.append('googlenet')\n model_names.append('pytorch')\n model_names.append('madry')\n model_names.append('madry_googlenet')\n print(model_names)\n\n\n mean_dict = {'pytorch': [],\n 'googlenet': [],\n 'madry': [],\n 'madry_googlenet': []} ## TODO: Chnages here\n\n var_dict = deepcopy(mean_dict)\n output = deepcopy(mean_dict)\n\n # load_model_fns = {'pytorch': eval('eutils.load_orig_imagenet_model'),\n # 'madry': eval('eutils.load_madry_model'),\n # 'googlenet': eval('eutils.load_orig_imagenet_model')}\n # load_model_args = {'pytorch': 'resnet50', 'madry': 'madry', 'googlenet': 'googlenet'}\n\n method_dict = {'grad': 'Grad',\n 'inpgrad': 'InpGrad',\n 'ig': 'IG',\n 'lime': 'Lime',\n 'mp': 'MP',\n 'occlusion': 'Occlusion',\n 'sg': 'SmoothGrad',\n }\n\n method_name = method_dict[args.method_name]\n metric_name = args.metric_name\n\n if method_name.lower() in ['occlusion', 'mp']:\n rescale_flag = False\n elif method_name.lower() in ['grad', 'inpgrad', 'ig', 'lime', 'smoothgrad']:\n rescale_flag = True\n\n # if method_name.lower() in ['occlusion', 'mp', 'lime']:\n # rescale_flag = False\n # elif method_name.lower() in ['grad', 'inpgrad', 'ig', 'smoothgrad']:\n # rescale_flag = True\n\n print(f'Rescale flag is {rescale_flag}')\n\n # img_filenames = []\n # with open(text_file, 'r') as f:\n # img_filenames = f.read().splitlines()\n # img_filenames = img_filenames[args.start_idx:args.end_idx]\n # if args.idx_flag == 1:\n # img_filenames = img_filenames[0]\n\n\n ## TODO: Chnages here\n img_file_numbers = np.array([int(imName.split('_')[-1]) for imName in img_filenames],\n dtype=int)\n\n print(f'Metric: {metric_name}')\n print(f'Method: {method_name}')\n\n # ## TODO: Chnages here\n # num_inc_imgs = len([i for i in img_filenames if i in incorrect_img_list])\n\n for modelIdx, model_name in enumerate(model_names):\n\n ## TODO: Chnages here\n temp_mean = np.zeros(len(img_filenames), dtype=float)\n temp_std = np.zeros(len(img_filenames), dtype=float) ## TODO: Chnages here (Name chnages)\n modelTime = time.time()\n print(f'Calculation variation for model: {model_name}')\n\n ## TODO: Chnages here\n idx = -1\n for _, img_name in enumerate(img_filenames):\n idx += 1\n\n if len(img_filenames) < 2000:\n print(f'Calculation variation across img: {img_name}, img_number: {idx:04d}')\n\n if args.no_model_name_dir_flag:\n dir_name = os.path.join(args.input_dir_path,\n f\"{method_name}_{model_name}\")\n else:\n dir_name = args.input_dir_path\n\n\n if method_name.lower() == 'lime':\n npy_str_lists = glob.glob(os.path.join(dir_name,\n f'{img_name}/time_*var_0.1_{model_name}.npy'))\n\n elif method_name.lower() == 'mp':\n im_num = int(img_name.split('_')[-1])\n npy_str_lists = glob.glob(os.path.join(dir_name,\n f'{img_name}/*{im_num:05d}_*{model_name}.npy'))\n\n elif method_name.lower() == 'occlusion':\n npy_str_lists = glob.glob(os.path.join(dir_name,\n f'{img_name}/*model_name_{model_name}*.npy'))\n\n if model_name.lower() == 'googlenet':\n npy_str_lists = [i for i in npy_str_lists if 'madry_googlenet' not in i]\n\n if model_name.lower() == 'madry':\n npy_str_lists = [i for i in npy_str_lists if 'madry_googlenet' not in i]\n\n npy_str_lists.sort()\n num_var = len(npy_str_lists)\n\n if args.exp_num.lower() == 'a01': # Patch 5, 6, 7\n baseline = [i for i in npy_str_lists if '_patch_size_006_' in i]\n npy_str_lists = [i for i in npy_str_lists if '_patch_size_006_' not in i]\n assert num_var - 1 == 2, 'Incorrect input path. Check your code'\n elif args.exp_num.lower() == 'a02': # Patch 5, 17, 29, 41, 53\n baseline = [i for i in npy_str_lists if '_patch_size_029_' in i]\n npy_str_lists = [i for i in npy_str_lists if '_patch_size_029_' not in i]\n assert num_var - 1 == 4, 'Incorrect input path. Check your code'\n elif args.exp_num.lower() == 'a03': # Patch 52, 53, 54\n baseline = [i for i in npy_str_lists if '_patch_size_053_' in i]\n npy_str_lists = [i for i in npy_str_lists if '_patch_size_053_' not in i]\n assert num_var - 1 == 2, 'Incorrect input path. Check your code'\n\n npy_str_lists.sort()\n\n\n else:\n npy_str_lists = glob.glob(os.path.join(dir_name,\n f'{img_name}/*_{img_name}_*model_name_'\n f'{model_name}_batch_idx*.npy'))\n\n assert len(baseline) == 1, 'Somthing is wrong with the baseline file'\n\n\n # print(f'Loading the results')\n heatmap_list = [np.load(i) for i in npy_str_lists]\n baseline_heatmap = [np.load(i) for i in baseline]\n\n if rescale_flag:\n ## Rescale the heatmaps to the original shape\n mVals = [max(np.abs(hMap.max()), np.abs(hMap.min())) for hMap in heatmap_list]\n heatmap_list = [hMap/mVals[hI] for hI, hMap in enumerate(heatmap_list)]\n\n mVals = [np.max(np.abs(hMap)) for hMap in baseline_heatmap]\n baseline_heatmap = [hMap / mVals[hI] for hI, hMap in enumerate(baseline_heatmap)]\n\n\n if method_name.lower() == 'occlusion':\n ## I can resize the input to the image size regardless of the input shape\n ## If input size is 224, there would not be any chnage in the output\n heatmap_list = [resize(i, (224, 224), order=5) for i in heatmap_list]\n baseline_heatmap = [resize(i, (224, 224), order=5) for i in baseline_heatmap]\n\n # if args.if_random:\n # if method_name.lower() == 'mp':\n # heatmap_list = [np.random.uniform(low=0, high=1, size=(i.shape)).astype(i.dtype) for i in heatmap_list]\n # else:\n # heatmap_list = [np.random.uniform(low=-1, high=1, size=(i.shape)).astype(i.dtype) for i in heatmap_list]\n\n # req_comb = list(combinations(range(num_var),2))\n scores = []\n for _, hMap in enumerate(heatmap_list):\n # for i1, i2 in req_comb:\n try:\n scores.append(compute_score(baseline_heatmap[0], hMap, method_name, metric_name))\n except:\n scores = [math.nan]\n\n if np.isnan(scores).any():\n temp_mean[idx] = math.nan\n temp_std[idx] = math.nan\n continue\n\n # ipdb.set_trace()\n mean_dict[model_name].append(np.mean(scores))\n var_dict[model_name].append(np.var(scores))\n\n temp_mean[idx] = np.mean(scores)\n temp_std[idx] = np.std(scores) ## TODO: Chnages here\n\n print(f'Len of samples considered is {len(mean_dict[model_name])}')\n output[model_name].append(np.mean(mean_dict[model_name]))\n output[model_name].append(np.sqrt(np.mean(var_dict[model_name]))) ## TODO: Chnages here\n\n print(f'Mean is {output[model_name][0]}, std is {output[model_name][1]}') ## TODO: Chnages here\n print(f'Time taken to evaluate {metric_name} metric for '\n f'model {model_name} on method {method_name} is {time.time() - modelTime}')\n\n\n if len(img_filenames) >= 1:\n ##Save the results to the text file\n path = os.path.join(args.out_path,\n f'Method_{method_name}_Metric_{metric_name}')\n eutils.mkdir_p(path)\n\n if args.if_random:\n fName = os.path.join(path, f'time_{f_time}_'\n f'Random_Baseline_{method_name}_{metric_name}.txt')\n else:\n fName = os.path.join(path, f'time_{f_time}_'\n f'Model_{model_name}_{method_name}_{metric_name}.txt')\n\n file_handle = open(fName, 'ab')\n temp_arr = np.concatenate((np.expand_dims(img_file_numbers, axis=-1),\n np.expand_dims(temp_mean, axis=-1),\n np.expand_dims(temp_std, axis=-1),\n ), axis=-1)\n\n ## TODO: Chnages here\n np.savetxt(file_handle, temp_arr, fmt='%05d, %.16f, %.16f',\n header='ImNum, Mean , Std', footer='\\nCumulative Results', comments='', ## TODO: Chnages here\n )\n temp_arr = np.concatenate((np.array([[len(mean_dict[model_name])]]),\n np.array([[output[model_name][0]]]),\n np.array([[output[model_name][1]]]),\n ), axis=-1)\n np.savetxt(file_handle, temp_arr,\n fmt='%05d, %.16f, %.16f',\n header='ImCou, Mean , Std', comments='',) ## TODO: Chnages here\n file_handle.close()\n\n ## TODO: Chnages here\n ## Saving the cumulative results\n if len(model_names) == 3:\n if len(img_filenames) >= 10:\n path = os.path.join(args.out_path,\n f'Method_{method_name}_Metric_{metric_name}')\n eutils.mkdir_p(path)\n fName = os.path.join(path, f'time_{f_time}_'\n f'cumulative_results.txt')\n file_handle = open(fName, 'ab')\n\n ## TODO: Chnages here\n gNet_row = np.asarray((['GoogleNet'] + [len(mean_dict['googlenet'])] + output['googlenet']), dtype=object).reshape(1, -1)\n rNet_row = np.asarray((['ResNet50'] + [len(mean_dict['pytorch'])] + output['pytorch']), dtype=object).reshape(1, -1)\n mNet_row = np.asarray((['Madry (Res)'] + [len(mean_dict['madry'])] + output['madry']), dtype=object).reshape(1, -1)\n temp_arr = np.concatenate((gNet_row,\n rNet_row,\n mNet_row,\n ),\n axis=0)\n np.savetxt(file_handle, temp_arr,\n fmt='%-11s, %05d, %.16f, %.16f',\n header='Network , ImCou, Mean , Std', comments='', ) ## TODO: Chnages here\n file_handle.close()\n ## TODO: Till here\n\n print(f'Time taken is {time.time() - s_time}')\n print(f'Time stamp is {f_time}')\n\n\n\n"
] | [
[
"numpy.expand_dims",
"numpy.abs",
"numpy.isnan",
"scipy.stats.pearsonr",
"numpy.load",
"numpy.var",
"numpy.concatenate",
"numpy.std",
"numpy.mean",
"numpy.savetxt",
"scipy.stats.spearmanr",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
lxjlu/highway-env | [
"b360ed3f4f7fca950294c82b04c55464624d5626"
] | [
"Pre-skills/data_collect.py"
] | [
"import gym\nimport highway_env\nimport time\nimport pprint\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nenvs = {\n 0: \"myenv-c1-v0\", # 直线\n 1: \"myenv-c2-v0\", # 中度弯曲\n 2: \"myenv-c3-v0\", # 大量弯曲\n}\nN = 50\n\n# 1保持 0减速 2加速 lon\n# -1左 0保持 1右 latral\n\"\"\"\n | 0 1 2\n-1 |\n 0 |\n 1 |\n\"\"\"\nlabels_index = np.arange(9).reshape(3, 3)\n\n\ndef anchor_selector():\n \"\"\"\n 选不同的卯\n :return:\n \"\"\"\n # 选择不同的道路\n env_lucky = envs[np.random.choice(np.arange(3))]\n # print(\"env is {}\".format(env_lucky))\n\n env = gym.make(env_lucky)\n\n # 选择不同的初始状态\n lanes_count = env.config[\"lanes_count\"]\n lane_id = np.random.choice(np.arange(lanes_count))\n print(\"v lane id is {}\".format(lane_id))\n\n if lane_id == 0:\n target_lane_id = np.random.choice([0, 1])\n elif lane_id == lanes_count - 1:\n target_lane_id = np.random.choice([lanes_count - 1, lanes_count - 2])\n else:\n target_lane_id = np.random.choice([lane_id - 1, lane_id, lane_id + 1])\n\n print(\"target lane id is {}\".format(target_lane_id))\n\n lon_operation = np.random.choice([0, 1, 2]) # 1保持 0减速 2加速\n print(\"1保持 0减速 2加速 - is {}\".format(lon_operation))\n\n v_lane_id = (\"a\", \"b\", lane_id)\n target_lane_id2 = (\"a\", \"b\", target_lane_id)\n v_target_s = (lon_operation - 1) * 5 + env.vehicle.speed\n v_target_s = np.clip(0, 30, v_target_s)\n\n positon_x = np.random.choice(np.arange(0, env.road.network.get_lane(v_lane_id).length, 5))\n positon_y = np.random.choice(np.arange(-2, 2, 0.5))\n heading = np.random.choice(\n env.road.network.get_lane(v_lane_id).heading_at(positon_x) + np.arange(-np.pi / 12, np.pi / 12, 10))\n speed = np.random.choice(np.arange(0, 26, 5))\n\n position = env.road.network.get_lane(v_lane_id).position(positon_x, positon_y)\n inital_state = [position, heading, speed]\n\n env.config[\"v_lane_id\"] = v_lane_id\n env.config[\"v_target_id\"] = target_lane_id2\n env.config[\"v_x\"] = positon_x\n env.config[\"v_y\"] = positon_y\n env.config[\"v_h\"] = heading\n env.config[\"v_s\"] = speed\n env.config[\"v_target_s\"] = v_target_s\n\n env.reset()\n p = env.vehicle.position\n i_h = env.vehicle.heading\n i_s = env.vehicle.speed\n temp = [p[0], p[1], i_h, i_s]\n x_road, y_road = env.vehicle.target_lane_position(p)\n # temp = temp.extend(x_road)\n # temp = temp.extend(y_road)\n action_his_omega = []\n action_his_accel = []\n action = 1\n x_his, y_his, h_his, s_his = [], [], [], []\n for _ in range(N):\n if env.vehicle.on_road is False:\n print(\"出去了\")\n break\n\n env.step(action)\n action_his_omega.append(env.vehicle.action[\"steering\"])\n action_his_accel.append(env.vehicle.action[\"acceleration\"])\n x_his.append(env.vehicle.position[0])\n y_his.append(env.vehicle.position[1])\n h_his.append(env.vehicle.heading)\n s_his.append(env.vehicle.speed)\n # env.render()\n # time.sleep(0.5)\n env.close()\n # temp = temp.extend(action_his_omega)\n # temp = temp.extend(action_his_accel)\n tt = temp + x_road + y_road + action_his_omega + action_his_accel\n pp = x_his + y_his + h_his + s_his\n lane_change = target_lane_id - lane_id\n\n label = labels_index[lane_change + 1, lon_operation]\n\n return lane_change, lon_operation, tt, pp, label\n\n\ndef positive_selector(lateral_operation, lon_operation):\n \"\"\"\n 选正的样本\n :return:\n \"\"\"\n \"\"\" 正样本 \"\"\"\n # 选择不同的初始状态\n positive_env = envs[np.random.choice(np.arange(3))]\n # print(\"env is {}\".format(positive_env))\n env = gym.make(positive_env)\n lanes_count = env.config[\"lanes_count\"]\n\n if lateral_operation == 1:\n lane_id = np.random.choice([0, 1])\n positive_lane_id = lane_id + 1\n elif lateral_operation == -1:\n lane_id = np.random.choice([2, 1])\n positive_lane_id = lane_id - 1\n else:\n lane_id = np.random.choice([2, 1, 0])\n positive_lane_id = lane_id\n # print(\"v lane id is {}\".format(lane_id))\n v_lane_id = (\"a\", \"b\", lane_id)\n # print(\"target lane id is {}\".format(positive_lane_id))\n target_lane_id = (\"a\", \"b\", positive_lane_id)\n v_target_s = (lon_operation - 1) * 5 + env.vehicle.speed\n # print(\"inital speed is {}, target speed is {}\".format(env.vehicle.speed, v_target_s))\n\n positive_positon_x = np.random.choice(np.arange(0, env.road.network.get_lane(v_lane_id).length, 10))\n positive_positon_y = np.random.choice(np.arange(-2, 2, 3))\n positive_heading = np.random.choice(\n env.road.network.get_lane(v_lane_id).heading_at(positive_positon_x) + np.arange(-np.pi / 6, np.pi / 6, 10))\n positive_speed = np.random.choice(np.arange(0, 25, 5))\n\n env.config[\"v_lane_id\"] = v_lane_id\n env.config[\"v_target_id\"] = target_lane_id\n env.config[\"v_x\"] = positive_positon_x\n env.config[\"v_y\"] = positive_positon_y\n env.config[\"v_h\"] = positive_heading\n env.config[\"v_s\"] = positive_speed\n env.config[\"v_target_s\"] = v_target_s\n\n env.reset()\n p = env.vehicle.position\n i_h = env.vehicle.heading\n i_s = env.vehicle.speed\n temp = [p[0], p[1], i_h, i_s]\n x_road, y_road = env.vehicle.target_lane_position(p)\n action = 1\n action_his_omega = []\n action_his_accel = []\n x_his, y_his, h_his, s_his = [], [], [], []\n for _ in range(N):\n\n if env.vehicle.on_road is False:\n print(\"出去了\")\n break\n env.step(action)\n action_his_omega.append(env.vehicle.action[\"steering\"])\n action_his_accel.append(env.vehicle.action[\"acceleration\"])\n x_his.append(env.vehicle.position[0])\n y_his.append(env.vehicle.position[1])\n h_his.append(env.vehicle.heading)\n s_his.append(env.vehicle.speed)\n # env.render()\n # time.sleep(0.5)\n tt = temp + x_road + y_road + action_his_omega + action_his_accel\n pp = x_his + y_his + h_his + s_his\n env.close()\n return tt, pp\n\n\ndef negative_selector(lateral_operation, lon_operation):\n \"\"\"\n 选8个负的样本\n :return:\n \"\"\"\n\n \"\"\" 负样本 \"\"\"\n i = 1\n tt_8 = []\n pp_8 = []\n for lateral_negative in [-1, 0, 1]:\n for lon_neagetive in [0, 1, 2]:\n # print(\"第 {} 次开始: \".format(i))\n # print((\"later is {}, lon is {}\".format(lateral_negative, lon_neagetive)))\n i += 1\n if lateral_negative == lateral_operation and lon_operation == lon_neagetive:\n print(\"冲突样本\")\n continue\n\n negative_env = envs[np.random.choice(np.arange(3))]\n # print(\"env is {}\".format(negative_env))\n env = gym.make(negative_env)\n lanes_count = env.config[\"lanes_count\"]\n # lane_id = np.random.choice(np.arange(lanes_count))\n\n if lateral_negative == 1:\n lane_id = np.random.choice([0, 1])\n negative_lane_id = lane_id + 1\n elif lateral_negative == -1:\n lane_id = np.random.choice([2, 1])\n negative_lane_id = lane_id - 1\n else:\n negative_lane_id = np.random.choice([2, 1, 0])\n lane_id = negative_lane_id\n\n # print(\"v lane id is {}\".format(lane_id))\n v_lane_id = (\"a\", \"b\", lane_id)\n # print(\"target lane id is {}\".format(negative_lane_id))\n target_lane_id = (\"a\", \"b\", negative_lane_id)\n v_target_s = (lon_neagetive - 1) * 5 + env.vehicle.speed\n # print(\"inital speed is {}, target speed is {}\".format(env.vehicle.speed, v_target_s))\n\n negative_positon_x = np.random.choice(np.arange(0, env.road.network.get_lane(v_lane_id).length, 10))\n negative_positon_y = np.random.choice(np.arange(-2, 1.9, 3))\n negative_heading = np.random.choice(\n env.road.network.get_lane(v_lane_id).heading_at(negative_positon_x) + np.arange(-np.pi / 6, np.pi / 6,\n 10))\n negative_speed = np.random.choice(np.arange(0, 25, 5))\n\n env.config[\"v_lane_id\"] = v_lane_id\n env.config[\"v_target_id\"] = target_lane_id\n env.config[\"v_x\"] = negative_positon_x\n env.config[\"v_y\"] = negative_positon_y\n env.config[\"v_h\"] = negative_heading\n env.config[\"v_s\"] = negative_speed\n env.config[\"v_target_s\"] = v_target_s\n\n env.reset()\n p = env.vehicle.position\n i_h = env.vehicle.heading\n i_s = env.vehicle.speed\n temp = [p[0], p[1], i_h, i_s]\n x_road, y_road = env.vehicle.target_lane_position(p)\n action = 1\n action_his_omega = []\n action_his_accel = []\n x_his, y_his, h_his, s_his = [], [], [], []\n for _ in range(N):\n\n if env.vehicle.on_road is False:\n print(\"出去了\")\n break\n env.step(action)\n action_his_omega.append(env.vehicle.action[\"steering\"])\n action_his_accel.append(env.vehicle.action[\"acceleration\"])\n x_his.append(env.vehicle.position[0])\n y_his.append(env.vehicle.position[1])\n h_his.append(env.vehicle.heading)\n s_his.append(env.vehicle.speed)\n # env.render()\n # time.sleep(0.5)\n tt = temp + x_road + y_road + action_his_omega + action_his_accel\n pp = x_his + y_his + h_his + s_his\n tt_8.append(tt)\n pp_8.append(pp)\n env.close()\n return tt_8, pp_8\n\n\n# print(\"------锚--------\")\n# inital_state, initial_road, lane_change, lon_operation = anchor_selector()\n# print(\"------正--------\")\n# positive_selector(lane_change, lon_operation)\n# print(\"------负--------\")\n# negative_selector(lane_change, lon_operation)\n\n\nlane_change, lon_operation, tt, pp, label = anchor_selector()\n# tt_8, pp_8 = negative_selector(lane_change, lon_operation)\n"
] | [
[
"numpy.arange",
"numpy.clip",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bartolo1024/RL-Local-Path-Planning | [
"c00824f54d7a2210cf26be5635e3c78233bf4a24"
] | [
"utils/__init__.py"
] | [
"import re\nimport numpy as np\nfrom .session import get_new_session_id\nfrom .loger import Logger\nfrom .saver import NetSaver\n\n\ndef _generate_action_space(speed_range=(.05, 0.3), speed_steps=2, rot_range=(-.3, .3), rot_steps=3):\n _speeds = np.linspace(*(speed_range + (speed_steps,)))\n _rots = np.linspace(*(rot_range + (rot_steps,)))\n speeds = np.tile(_speeds, len(_rots))\n rots = np.repeat(_rots, len(_speeds))\n space = tuple((s, r) for s, r in zip(speeds, rots))\n return space\n\n\ndef generate_action_space(gen_str):\n ret = ((.3, .0), (.05, .3), (.05, -.3))\n if not gen_str:\n return ret\n match = re.match(r'GEN\\((\\d+),(\\d+)\\)', gen_str)\n if match:\n speed_steps = int(match.group(1))\n rot_steps = int(match.group(2))\n ret = _generate_action_space(speed_steps=speed_steps, rot_steps=rot_steps)\n return ret\n"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
norgon1123/AugSenseTF | [
"4f8b22dfc8c9f7a4e07ec4f1d41a05b6967364be"
] | [
"scripts/label_image.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(file_reader, channels = 3,\n name='png_reader')\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\n name='gif_reader'))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\n name='jpeg_reader')\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\nif __name__ == \"__main__\":\n file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\n model_file = \"tf_files/retrained_graph.pb\"\n label_file = \"tf_files/retrained_labels.txt\"\n input_height = 224\n input_width = 224\n input_mean = 128\n input_std = 128\n input_layer = \"input\"\n output_layer = \"final_result\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n if args.image:\n file_name = args.image\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_file(file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name);\n output_operation = graph.get_operation_by_name(output_name);\n\n with tf.Session(graph=graph) as sess:\n results = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: t})\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n for i in top_k:\n print(labels[i], results[i])\n"
] | [
[
"tensorflow.Graph",
"tensorflow.image.resize_bilinear",
"tensorflow.import_graph_def",
"tensorflow.read_file",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.cast",
"tensorflow.image.decode_png",
"tensorflow.expand_dims",
"tensorflow.image.decode_bmp",
"tensorflow.subtract",
"tensorflow.image.decode_gif",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.image.decode_jpeg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
shijiale0609/Python_Data_Analysis | [
"c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e"
] | [
"PythonDAdata/3358OS_06_Code/code6/pd_plotting.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndf = pd.read_csv('transcount.csv')\ndf = df.groupby('year').aggregate(np.mean)\n\ngpu = pd.read_csv('gpu_transcount.csv')\ngpu = gpu.groupby('year').aggregate(np.mean)\n\ndf = pd.merge(df, gpu, how='outer', left_index=True, right_index=True)\ndf = df.replace(np.nan, 0)\ndf.plot()\ndf.plot(logy=True)\ndf[df['gpu_trans_count'] > 0].plot(kind='scatter', x='trans_count', y='gpu_trans_count', loglog=True)\nplt.show()\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
RobotJud2019/atmt-1 | [
"3966ec45b12a8512e8629266ccc3bcf5ece8bb3c"
] | [
"translate_beam.py"
] | [
"import os\nimport logging\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\nfrom torch.serialization import default_restore_location\n\nfrom seq2seq import models, utils\nfrom seq2seq.data.dictionary import Dictionary\nfrom seq2seq.data.dataset import Seq2SeqDataset, BatchSampler\nfrom seq2seq.beam import BeamSearch, BeamSearchNode\n\n\ndef get_args():\n \"\"\" Defines generation-specific hyper-parameters. \"\"\"\n parser = argparse.ArgumentParser('Sequence to Sequence Model')\n parser.add_argument('--cuda', default=False, help='Use a GPU')\n parser.add_argument('--seed', default=42, type=int, help='pseudo random number generator seed')\n\n # Add data arguments\n parser.add_argument('--data', default='data_asg4/prepared_data', help='path to data directory')\n parser.add_argument('--checkpoint-path', default='checkpoints_asg4/checkpoint_best.pt', help='path to the model file')\n parser.add_argument('--batch-size', default=None, type=int, help='maximum number of sentences in a batch')\n parser.add_argument('--output', default='model_translations.txt', type=str,\n help='path to the output file destination')\n parser.add_argument('--max-len', default=100, type=int, help='maximum length of generated sequence')\n\n # Add beam search arguments\n parser.add_argument('--beam-size', default=5, type=int, help='number of hypotheses expanded in beam search')\n\n return parser.parse_args()\n\n\ndef main(args):\n \"\"\" Main translation function' \"\"\"\n\n output_sentences_div = []\n osd_idx = 0\n best_N = 3\n n_sents = 0\n\n # Load arguments from checkpoint\n torch.manual_seed(args.seed)\n state_dict = torch.load(args.checkpoint_path, map_location=lambda s, l: default_restore_location(s, 'cpu'))\n args_loaded = argparse.Namespace(**{**vars(args), **vars(state_dict['args'])})\n args_loaded.data = args.data\n args = args_loaded\n utils.init_logging(args)\n\n # Load dictionaries\n src_dict = Dictionary.load(os.path.join(args.data, 'dict.{:s}'.format(args.source_lang)))\n logging.info('Loaded a source dictionary ({:s}) with {:d} words'.format(args.source_lang, len(src_dict)))\n tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{:s}'.format(args.target_lang)))\n logging.info('Loaded a target dictionary ({:s}) with {:d} words'.format(args.target_lang, len(tgt_dict)))\n\n # Load dataset\n test_dataset = Seq2SeqDataset(\n src_file=os.path.join(args.data, 'test.{:s}'.format(args.source_lang)),\n tgt_file=os.path.join(args.data, 'test.{:s}'.format(args.target_lang)),\n src_dict=src_dict, tgt_dict=tgt_dict)\n\n test_loader = torch.utils.data.DataLoader(test_dataset, num_workers=1, collate_fn=test_dataset.collater,\n batch_sampler=BatchSampler(test_dataset, 9999999,\n args.batch_size, 1, 0, shuffle=False,\n seed=args.seed))\n # Build model and criterion\n model = models.build_model(args, src_dict, tgt_dict)\n if args.cuda:\n model = model.cuda()\n model.eval()\n model.load_state_dict(state_dict['model'])\n logging.info('Loaded a model from checkpoint {:s}'.format(args.checkpoint_path))\n progress_bar = tqdm(test_loader, desc='| Generation', leave=False)\n\n # Iterate over the test set\n all_hyps = {}\n for i, sample in enumerate(progress_bar):\n\n # Create a beam search object or every input sentence in batch\n batch_size = sample['src_tokens'].shape[0]\n searches = [BeamSearch(args.beam_size, args.max_len - 1, tgt_dict.unk_idx) for i in range(batch_size)]\n n_sents += len(searches)\n\n with torch.no_grad():\n # Compute the encoder output\n encoder_out = model.encoder(sample['src_tokens'], sample['src_lengths'])\n # __QUESTION 1: What is \"go_slice\" used for and what do its dimensions represent?\n go_slice = \\\n torch.ones(sample['src_tokens'].shape[0], 1).fill_(tgt_dict.eos_idx).type_as(sample['src_tokens'])\n if args.cuda:\n go_slice = utils.move_to_cuda(go_slice)\n\n # Compute the decoder output at the first time step\n decoder_out, _ = model.decoder(go_slice, encoder_out)\n\n # __QUESTION 2: Why do we keep one top candidate more than the beam size?\n log_probs, next_candidates = torch.topk(torch.log(torch.softmax(decoder_out, dim=2)),\n args.beam_size+1, dim=-1)\n\n # Create number of beam_size beam search nodes for every input sentence\n for i in range(batch_size):\n for j in range(args.beam_size):\n best_candidate = next_candidates[i, :, j]\n backoff_candidate = next_candidates[i, :, j+1]\n best_log_p = log_probs[i, :, j]\n backoff_log_p = log_probs[i, :, j+1]\n next_word = torch.where(best_candidate == tgt_dict.unk_idx, backoff_candidate, best_candidate)\n log_p = torch.where(best_candidate == tgt_dict.unk_idx, backoff_log_p, best_log_p)\n log_p = log_p[-1]\n\n # Store the encoder_out information for the current input sentence and beam\n emb = encoder_out['src_embeddings'][:,i,:]\n lstm_out = encoder_out['src_out'][0][:,i,:]\n final_hidden = encoder_out['src_out'][1][:,i,:]\n final_cell = encoder_out['src_out'][2][:,i,:]\n try:\n mask = encoder_out['src_mask'][i,:]\n except TypeError:\n mask = None\n\n node = BeamSearchNode(searches[i], emb, lstm_out, final_hidden, final_cell,\n mask, torch.cat((go_slice[i], next_word)), log_p, 1)\n # __QUESTION 3: Why do we add the node with a negative score?\n searches[i].add(-node.eval(), node)\n\n # Start generating further tokens until max sentence length reached\n for _ in range(args.max_len-1):\n\n # Get the current nodes to expand\n nodes = [n[1] for s in searches for n in s.get_current_beams()]\n if nodes == []:\n break # All beams ended in EOS\n\n # Reconstruct prev_words, encoder_out from current beam search nodes\n prev_words = torch.stack([node.sequence for node in nodes])\n encoder_out[\"src_embeddings\"] = torch.stack([node.emb for node in nodes], dim=1)\n lstm_out = torch.stack([node.lstm_out for node in nodes], dim=1)\n final_hidden = torch.stack([node.final_hidden for node in nodes], dim=1)\n final_cell = torch.stack([node.final_cell for node in nodes], dim=1)\n encoder_out[\"src_out\"] = (lstm_out, final_hidden, final_cell)\n try:\n encoder_out[\"src_mask\"] = torch.stack([node.mask for node in nodes], dim=0)\n except TypeError:\n encoder_out[\"src_mask\"] = None\n\n with torch.no_grad():\n # Compute the decoder output by feeding it the decoded sentence prefix\n decoder_out, _ = model.decoder(prev_words, encoder_out)\n\n # see __QUESTION 2\n log_probs, next_candidates = torch.topk(torch.log(torch.softmax(decoder_out, dim=2)), args.beam_size+1, dim=-1)\n\n # Create number of beam_size next nodes for every current node\n for i in range(log_probs.shape[0]):\n for j in range(args.beam_size):\n\n best_candidate = next_candidates[i, :, j]\n backoff_candidate = next_candidates[i, :, j+1]\n best_log_p = log_probs[i, :, j]\n backoff_log_p = log_probs[i, :, j+1]\n next_word = torch.where(best_candidate == tgt_dict.unk_idx, backoff_candidate, best_candidate)\n log_p = torch.where(best_candidate == tgt_dict.unk_idx, backoff_log_p, best_log_p)\n log_p = log_p[-1]\n next_word = torch.cat((prev_words[i][1:], next_word[-1:]))\n\n # Get parent node and beam search object for corresponding sentence\n node = nodes[i]\n search = node.search\n\n # __QUESTION 4: How are \"add\" and \"add_final\" different? What would happen if we did not make this distinction?\n\n # Store the node as final if EOS is generated\n if next_word[-1 ] == tgt_dict.eos_idx:\n node = BeamSearchNode(search, node.emb, node.lstm_out, node.final_hidden,\n node.final_cell, node.mask, torch.cat((prev_words[i][0].view([1]),\n next_word)), node.logp, node.length)\n search.add_final(-node.eval(), node)\n\n # Add the node to current nodes for next iteration\n else:\n node = BeamSearchNode(search, node.emb, node.lstm_out, node.final_hidden,\n node.final_cell, node.mask, torch.cat((prev_words[i][0].view([1]),\n next_word)), node.logp + log_p, node.length + 1)\n search.add(-node.eval(), node)\n\n # __QUESTION 5: What happens internally when we prune our beams?\n # How do we know we always maintain the best sequences?\n for search in searches:\n search.prune()\n\n # Segment into sentences\n\n BestN = []\n for search in searches:\n Nnodes = []\n Nnodes = search.get_bestN(best_N)\n for n, p in enumerate(Nnodes):\n BestN.append(p[1].sequence[1:].cpu())\n\n best_sents2 = torch.stack(BestN)\n\n decoded_batch = best_sents2.numpy()\n\n output_sentences = [decoded_batch[row, :] for row in range(decoded_batch.shape[0])]\n\n # __QUESTION 6: What is the purpose of this for loop?\n temp = list()\n for sent in output_sentences:\n first_eos = np.where(sent == tgt_dict.eos_idx)[0]\n if len(first_eos) > 0:\n temp.append(sent[:first_eos[0]])\n else:\n temp.append(sent)\n output_sentences = temp\n\n # Convert arrays of indices into strings of words\n output_sentences = [tgt_dict.string(sent) for sent in output_sentences]\n\n # need dictionary \n for ii, sent in enumerate(output_sentences):\n output_sentences_div.append(output_sentences[ii])\n osd_idx += 1\n if ii%3 > 0: continue\n all_hyps[int(sample['id'].data[ii // 3])] = sent\n\n # Write to file\n if args.output is not None:\n with open(args.output, 'w') as out_file:\n for sent_id in range(len(all_hyps.keys())):\n if sent_id not in all_hyps:\n out_file.write(\"sent_id \" + str(sent_id) + \" not in dictionary\" + '\\n')\n continue\n out_file.write(all_hyps[sent_id] + '\\n')\n if best_N*n_sents > osd_idx:\n for jjj in range(n_sents-osd_idx//best_N):\n out_file.write('dummy sentence ' + str(jjj) + '\\n')\n\n if args.output is not None:\n with open('raw_' + args.output , 'w') as out_file:\n for ll in range(osd_idx):\n out_file.write(output_sentences_div[ll] + '\\n')\n if best_N*n_sents > osd_idx:\n for jjj in range(n_sents-osd_idx):\n out_file.write('dummy sentence ' + str(jjj) + '\\n')\n\n\nif __name__ == '__main__':\n args = get_args()\n main(args)\n"
] | [
[
"torch.softmax",
"torch.ones",
"torch.cat",
"torch.manual_seed",
"torch.serialization.default_restore_location",
"torch.no_grad",
"torch.where",
"torch.stack",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suryaabhi/ray | [
"112ef075632c0815beb9838b91a83331fe649f0b"
] | [
"python/ray/worker.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport atexit\nimport cloudpickle as pickle\nimport collections\nimport colorama\nimport copy\nimport hashlib\nimport inspect\nimport json\nimport numpy as np\nimport os\nimport redis\nimport signal\nimport sys\nimport threading\nimport time\nimport traceback\n\n# Ray modules\nimport pyarrow\nimport pyarrow.plasma as plasma\nimport ray.experimental.state as state\nimport ray.serialization as serialization\nimport ray.services as services\nimport ray.signature as signature\nimport ray.local_scheduler\nimport ray.plasma\nfrom ray.utils import (FunctionProperties, random_string, binary_to_hex,\n is_cython)\n\nSCRIPT_MODE = 0\nWORKER_MODE = 1\nPYTHON_MODE = 2\nSILENT_MODE = 3\n\nLOG_POINT = 0\nLOG_SPAN_START = 1\nLOG_SPAN_END = 2\n\nERROR_KEY_PREFIX = b\"Error:\"\nDRIVER_ID_LENGTH = 20\nERROR_ID_LENGTH = 20\n\n# This must match the definition of NIL_ACTOR_ID in task.h.\nNIL_ID = 20 * b\"\\xff\"\nNIL_LOCAL_SCHEDULER_ID = NIL_ID\nNIL_FUNCTION_ID = NIL_ID\nNIL_ACTOR_ID = NIL_ID\n\n# This must be kept in sync with the `error_types` array in\n# common/state/error_table.h.\nOBJECT_HASH_MISMATCH_ERROR_TYPE = b\"object_hash_mismatch\"\nPUT_RECONSTRUCTION_ERROR_TYPE = b\"put_reconstruction\"\n\n# This must be kept in sync with the `scheduling_state` enum in common/task.h.\nTASK_STATUS_RUNNING = 8\n\n\nclass FunctionID(object):\n def __init__(self, function_id):\n self.function_id = function_id\n\n def id(self):\n return self.function_id\n\n\nclass RayTaskError(Exception):\n \"\"\"An object used internally to represent a task that threw an exception.\n\n If a task throws an exception during execution, a RayTaskError is stored in\n the object store for each of the task's outputs. When an object is\n retrieved from the object store, the Python method that retrieved it checks\n to see if the object is a RayTaskError and if it is then an exception is\n thrown propagating the error message.\n\n Currently, we either use the exception attribute or the traceback attribute\n but not both.\n\n Attributes:\n function_name (str): The name of the function that failed and produced\n the RayTaskError.\n exception (Exception): The exception object thrown by the failed task.\n traceback_str (str): The traceback from the exception.\n \"\"\"\n\n def __init__(self, function_name, exception, traceback_str):\n \"\"\"Initialize a RayTaskError.\"\"\"\n self.function_name = function_name\n if (isinstance(exception, RayGetError) or\n isinstance(exception, RayGetArgumentError)):\n self.exception = exception\n else:\n self.exception = None\n self.traceback_str = traceback_str\n\n def __str__(self):\n \"\"\"Format a RayTaskError as a string.\"\"\"\n if self.traceback_str is None:\n # This path is taken if getting the task arguments failed.\n return (\"Remote function {}{}{} failed with:\\n\\n{}\"\n .format(colorama.Fore.RED, self.function_name,\n colorama.Fore.RESET, self.exception))\n else:\n # This path is taken if the task execution failed.\n return (\"Remote function {}{}{} failed with:\\n\\n{}\"\n .format(colorama.Fore.RED, self.function_name,\n colorama.Fore.RESET, self.traceback_str))\n\n\nclass RayGetError(Exception):\n \"\"\"An exception used when get is called on an output of a failed task.\n\n Attributes:\n objectid (lib.ObjectID): The ObjectID that get was called on.\n task_error (RayTaskError): The RayTaskError object created by the\n failed task.\n \"\"\"\n\n def __init__(self, objectid, task_error):\n \"\"\"Initialize a RayGetError object.\"\"\"\n self.objectid = objectid\n self.task_error = task_error\n\n def __str__(self):\n \"\"\"Format a RayGetError as a string.\"\"\"\n return (\"Could not get objectid {}. It was created by remote function \"\n \"{}{}{} which failed with:\\n\\n{}\"\n .format(self.objectid, colorama.Fore.RED,\n self.task_error.function_name, colorama.Fore.RESET,\n self.task_error))\n\n\nclass RayGetArgumentError(Exception):\n \"\"\"An exception used when a task's argument was produced by a failed task.\n\n Attributes:\n argument_index (int): The index (zero indexed) of the failed argument\n in present task's remote function call.\n function_name (str): The name of the function for the current task.\n objectid (lib.ObjectID): The ObjectID that was passed in as the\n argument.\n task_error (RayTaskError): The RayTaskError object created by the\n failed task.\n \"\"\"\n\n def __init__(self, function_name, argument_index, objectid, task_error):\n \"\"\"Initialize a RayGetArgumentError object.\"\"\"\n self.argument_index = argument_index\n self.function_name = function_name\n self.objectid = objectid\n self.task_error = task_error\n\n def __str__(self):\n \"\"\"Format a RayGetArgumentError as a string.\"\"\"\n return (\"Failed to get objectid {} as argument {} for remote function \"\n \"{}{}{}. It was created by remote function {}{}{} which \"\n \"failed with:\\n{}\".format(self.objectid, self.argument_index,\n colorama.Fore.RED,\n self.function_name,\n colorama.Fore.RESET,\n colorama.Fore.RED,\n self.task_error.function_name,\n colorama.Fore.RESET,\n self.task_error))\n\n\nclass Worker(object):\n \"\"\"A class used to define the control flow of a worker process.\n\n Note:\n The methods in this class are considered unexposed to the user. The\n functions outside of this class are considered exposed.\n\n Attributes:\n functions (Dict[str, Callable]): A dictionary mapping the name of a\n remote function to the remote function itself. This is the set of\n remote functions that can be executed by this worker.\n connected (bool): True if Ray has been started and False otherwise.\n mode: The mode of the worker. One of SCRIPT_MODE, PYTHON_MODE,\n SILENT_MODE, and WORKER_MODE.\n cached_remote_functions_and_actors: A list of information for exporting\n remote functions and actor classes definitions that were defined\n before the worker called connect. When the worker eventually does\n call connect, if it is a driver, it will export these functions and\n actors. If cached_remote_functions_and_actors is None, that means\n that connect has been called already.\n cached_functions_to_run (List): A list of functions to run on all of\n the workers that should be exported as soon as connect is called.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a Worker object.\"\"\"\n # The functions field is a dictionary that maps a driver ID to a\n # dictionary of functions that have been registered for that driver\n # (this inner dictionary maps function IDs to a tuple of the function\n # name and the function itself). This should only be used on workers\n # that execute remote functions.\n self.functions = collections.defaultdict(lambda: {})\n # The function_properties field is a dictionary that maps a driver ID\n # to a dictionary of functions that have been registered for that\n # driver (this inner dictionary maps function IDs to a tuple of the\n # number of values returned by that function, the number of CPUs\n # required by that function, and the number of GPUs required by that\n # function). This is used when submitting a function (which can be done\n # both on workers and on drivers).\n self.function_properties = collections.defaultdict(lambda: {})\n # This is a dictionary mapping driver ID to a dictionary that maps\n # remote function IDs for that driver to a counter of the number of\n # times that remote function has been executed on this worker. The\n # counter is incremented every time the function is executed on this\n # worker. When the counter reaches the maximum number of executions\n # allowed for a particular function, the worker is killed.\n self.num_task_executions = collections.defaultdict(lambda: {})\n self.connected = False\n self.mode = None\n self.cached_remote_functions_and_actors = []\n self.cached_functions_to_run = []\n self.fetch_and_register_actor = None\n self.make_actor = None\n self.actors = {}\n self.actor_task_counter = 0\n # Whether an actor instance has been loaded yet. The actor counts as\n # loaded once it has either executed its first task or successfully\n # resumed from a checkpoint.\n self.actor_loaded = False\n # This field is used to report actor checkpoint failure for the last\n # task assigned. Workers are not assigned a task on startup, so we\n # initialize to False.\n self.actor_checkpoint_failed = False\n # TODO(swang): This is a hack to prevent the object store from evicting\n # dummy objects. Once we allow object pinning in the store, we may\n # remove this variable.\n self.actor_pinned_objects = None\n # The number of threads Plasma should use when putting an object in the\n # object store.\n self.memcopy_threads = 12\n\n def set_mode(self, mode):\n \"\"\"Set the mode of the worker.\n\n The mode SCRIPT_MODE should be used if this Worker is a driver that is\n being run as a Python script or interactively in a shell. It will print\n information about task failures.\n\n The mode WORKER_MODE should be used if this Worker is not a driver. It\n will not print information about tasks.\n\n The mode PYTHON_MODE should be used if this Worker is a driver and if\n you want to run the driver in a manner equivalent to serial Python for\n debugging purposes. It will not send remote function calls to the\n scheduler and will insead execute them in a blocking fashion.\n\n The mode SILENT_MODE should be used only during testing. It does not\n print any information about errors because some of the tests\n intentionally fail.\n\n args:\n mode: One of SCRIPT_MODE, WORKER_MODE, PYTHON_MODE, and\n SILENT_MODE.\n \"\"\"\n self.mode = mode\n\n def store_and_register(self, object_id, value, depth=100):\n \"\"\"Store an object and attempt to register its class if needed.\n\n Args:\n object_id: The ID of the object to store.\n value: The value to put in the object store.\n depth: The maximum number of classes to recursively register.\n\n Raises:\n Exception: An exception is raised if the attempt to store the\n object fails. This can happen if there is already an object\n with the same ID in the object store or if the object store is\n full.\n \"\"\"\n counter = 0\n while True:\n if counter == depth:\n raise Exception(\"Ray exceeded the maximum number of classes \"\n \"that it will recursively serialize when \"\n \"attempting to serialize an object of \"\n \"type {}.\".format(type(value)))\n counter += 1\n try:\n self.plasma_client.put(\n value,\n object_id=pyarrow.plasma.ObjectID(object_id.id()),\n memcopy_threads=self.memcopy_threads,\n serialization_context=self.serialization_context)\n break\n except pyarrow.SerializationCallbackError as e:\n try:\n register_custom_serializer(type(e.example_object),\n use_dict=True)\n warning_message = (\"WARNING: Serializing objects of type \"\n \"{} by expanding them as dictionaries \"\n \"of their fields. This behavior may \"\n \"be incorrect in some cases.\"\n .format(type(e.example_object)))\n print(warning_message)\n except (serialization.RayNotDictionarySerializable,\n serialization.CloudPickleError,\n pickle.pickle.PicklingError,\n Exception):\n # We also handle generic exceptions here because\n # cloudpickle can fail with many different types of errors.\n try:\n register_custom_serializer(type(e.example_object),\n use_pickle=True)\n warning_message = (\"WARNING: Falling back to \"\n \"serializing objects of type {} by \"\n \"using pickle. This may be \"\n \"inefficient.\"\n .format(type(e.example_object)))\n print(warning_message)\n except serialization.CloudPickleError:\n register_custom_serializer(type(e.example_object),\n use_pickle=True,\n local=True)\n warning_message = (\"WARNING: Pickling the class {} \"\n \"failed, so we are using pickle \"\n \"and only registering the class \"\n \"locally.\"\n .format(type(e.example_object)))\n print(warning_message)\n\n def put_object(self, object_id, value):\n \"\"\"Put value in the local object store with object id objectid.\n\n This assumes that the value for objectid has not yet been placed in the\n local object store.\n\n Args:\n object_id (object_id.ObjectID): The object ID of the value to be\n put.\n value: The value to put in the object store.\n\n Raises:\n Exception: An exception is raised if the attempt to store the\n object fails. This can happen if there is already an object\n with the same ID in the object store or if the object store is\n full.\n \"\"\"\n # Make sure that the value is not an object ID.\n if isinstance(value, ray.local_scheduler.ObjectID):\n raise Exception(\"Calling 'put' on an ObjectID is not allowed \"\n \"(similarly, returning an ObjectID from a remote \"\n \"function is not allowed). If you really want to \"\n \"do this, you can wrap the ObjectID in a list and \"\n \"call 'put' on it (or return it).\")\n\n if isinstance(value, ray.actor.ActorHandleParent):\n raise Exception(\"Calling 'put' on an actor handle is currently \"\n \"not allowed (similarly, returning an actor \"\n \"handle from a remote function is not allowed).\")\n\n # Serialize and put the object in the object store.\n try:\n self.store_and_register(object_id, value)\n except pyarrow.PlasmaObjectExists as e:\n # The object already exists in the object store, so there is no\n # need to add it again. TODO(rkn): We need to compare the hashes\n # and make sure that the objects are in fact the same. We also\n # should return an error code to the caller instead of printing a\n # message.\n print(\"This object already exists in the object store.\")\n\n def retrieve_and_deserialize(self, object_ids, timeout, error_timeout=10):\n start_time = time.time()\n # Only send the warning once.\n warning_sent = False\n while True:\n try:\n # We divide very large get requests into smaller get requests\n # so that a single get request doesn't block the store for a\n # long time, if the store is blocked, it can block the manager\n # as well as a consequence.\n results = []\n for i in range(0, len(object_ids),\n ray._config.worker_get_request_size()):\n results += self.plasma_client.get(\n object_ids[i:(i +\n ray._config.worker_get_request_size())],\n timeout,\n self.serialization_context)\n return results\n except pyarrow.lib.ArrowInvalid as e:\n # TODO(ekl): the local scheduler could include relevant\n # metadata in the task kill case for a better error message\n invalid_error = RayTaskError(\n \"<unknown>\", None,\n \"Invalid return value: likely worker died or was killed \"\n \"while executing the task.\")\n return [invalid_error] * len(object_ids)\n except pyarrow.DeserializationCallbackError as e:\n # Wait a little bit for the import thread to import the class.\n # If we currently have the worker lock, we need to release it\n # so that the import thread can acquire it.\n if self.mode == WORKER_MODE:\n self.lock.release()\n time.sleep(0.01)\n if self.mode == WORKER_MODE:\n self.lock.acquire()\n\n if time.time() - start_time > error_timeout:\n warning_message = (\"This worker or driver is waiting to \"\n \"receive a class definition so that it \"\n \"can deserialize an object from the \"\n \"object store. This may be fine, or it \"\n \"may be a bug.\")\n if not warning_sent:\n ray.utils.push_error_to_driver(\n self.redis_client, \"wait_for_class\",\n warning_message,\n driver_id=self.task_driver_id.id())\n warning_sent = True\n\n def get_object(self, object_ids):\n \"\"\"Get the value or values in the object store associated with the IDs.\n\n Return the values from the local object store for object_ids. This will\n block until all the values for object_ids have been written to the\n local object store.\n\n Args:\n object_ids (List[object_id.ObjectID]): A list of the object IDs\n whose values should be retrieved.\n \"\"\"\n # Make sure that the values are object IDs.\n for object_id in object_ids:\n if not isinstance(object_id, ray.local_scheduler.ObjectID):\n raise Exception(\"Attempting to call `get` on the value {}, \"\n \"which is not an ObjectID.\".format(object_id))\n # Do an initial fetch for remote objects. We divide the fetch into\n # smaller fetches so as to not block the manager for a prolonged period\n # of time in a single call.\n plain_object_ids = [plasma.ObjectID(object_id.id())\n for object_id in object_ids]\n for i in range(0, len(object_ids),\n ray._config.worker_fetch_request_size()):\n self.plasma_client.fetch(\n plain_object_ids[i:(i +\n ray._config.worker_fetch_request_size())])\n\n # Get the objects. We initially try to get the objects immediately.\n final_results = self.retrieve_and_deserialize(plain_object_ids, 0)\n # Construct a dictionary mapping object IDs that we haven't gotten yet\n # to their original index in the object_ids argument.\n unready_ids = dict((plain_object_ids[i].binary(), i) for (i, val) in\n enumerate(final_results)\n if val is plasma.ObjectNotAvailable)\n was_blocked = (len(unready_ids) > 0)\n # Try reconstructing any objects we haven't gotten yet. Try to get them\n # until at least get_timeout_milliseconds milliseconds passes, then\n # repeat.\n while len(unready_ids) > 0:\n for unready_id in unready_ids:\n self.local_scheduler_client.reconstruct_object(unready_id)\n # Do another fetch for objects that aren't available locally yet,\n # in case they were evicted since the last fetch. We divide the\n # fetch into smaller fetches so as to not block the manager for a\n # prolonged period of time in a single call.\n object_ids_to_fetch = list(map(\n plasma.ObjectID, unready_ids.keys()))\n for i in range(0, len(object_ids_to_fetch),\n ray._config.worker_fetch_request_size()):\n self.plasma_client.fetch(\n object_ids_to_fetch[i:(\n i + ray._config.worker_fetch_request_size())])\n results = self.retrieve_and_deserialize(\n object_ids_to_fetch,\n max([ray._config.get_timeout_milliseconds(),\n int(0.01 * len(unready_ids))]))\n # Remove any entries for objects we received during this iteration\n # so we don't retrieve the same object twice.\n for i, val in enumerate(results):\n if val is not plasma.ObjectNotAvailable:\n object_id = object_ids_to_fetch[i].binary()\n index = unready_ids[object_id]\n final_results[index] = val\n unready_ids.pop(object_id)\n\n # If there were objects that we weren't able to get locally, let the\n # local scheduler know that we're now unblocked.\n if was_blocked:\n self.local_scheduler_client.notify_unblocked()\n\n assert len(final_results) == len(object_ids)\n return final_results\n\n def submit_task(self, function_id, args, actor_id=None,\n actor_handle_id=None, actor_counter=0,\n is_actor_checkpoint_method=False,\n execution_dependencies=None):\n \"\"\"Submit a remote task to the scheduler.\n\n Tell the scheduler to schedule the execution of the function with ID\n function_id with arguments args. Retrieve object IDs for the outputs of\n the function from the scheduler and immediately return them.\n\n Args:\n function_id: The ID of the function to execute.\n args: The arguments to pass into the function. Arguments can be\n object IDs or they can be values. If they are values, they must\n be serializable objecs.\n actor_id: The ID of the actor that this task is for.\n actor_counter: The counter of the actor task.\n is_actor_checkpoint_method: True if this is an actor checkpoint\n task and false otherwise.\n \"\"\"\n with log_span(\"ray:submit_task\", worker=self):\n check_main_thread()\n if actor_id is None:\n assert actor_handle_id is None\n actor_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)\n actor_handle_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)\n else:\n assert actor_handle_id is not None\n # Put large or complex arguments that are passed by value in the\n # object store first.\n args_for_local_scheduler = []\n for arg in args:\n if isinstance(arg, ray.local_scheduler.ObjectID):\n args_for_local_scheduler.append(arg)\n elif isinstance(arg, ray.actor.ActorHandleParent):\n args_for_local_scheduler.append(put(\n ray.actor.wrap_actor_handle(arg)))\n elif ray.local_scheduler.check_simple_value(arg):\n args_for_local_scheduler.append(arg)\n else:\n args_for_local_scheduler.append(put(arg))\n\n # By default, there are no execution dependencies.\n if execution_dependencies is None:\n execution_dependencies = []\n\n # Look up the various function properties.\n function_properties = self.function_properties[\n self.task_driver_id.id()][function_id.id()]\n\n # Submit the task to local scheduler.\n task = ray.local_scheduler.Task(\n self.task_driver_id,\n ray.local_scheduler.ObjectID(function_id.id()),\n args_for_local_scheduler,\n function_properties.num_return_vals,\n self.current_task_id,\n self.task_index,\n actor_id,\n actor_handle_id,\n actor_counter,\n is_actor_checkpoint_method,\n execution_dependencies,\n function_properties.resources)\n # Increment the worker's task index to track how many tasks have\n # been submitted by the current task so far.\n self.task_index += 1\n self.local_scheduler_client.submit(task)\n\n return task.returns()\n\n def run_function_on_all_workers(self, function):\n \"\"\"Run arbitrary code on all of the workers.\n\n This function will first be run on the driver, and then it will be\n exported to all of the workers to be run. It will also be run on any\n new workers that register later. If ray.init has not been called yet,\n then cache the function and export it later.\n\n Args:\n function (Callable): The function to run on all of the workers. It\n should not take any arguments. If it returns anything, its\n return values will not be used.\n \"\"\"\n check_main_thread()\n # If ray.init has not been called yet, then cache the function and\n # export it when connect is called. Otherwise, run the function on all\n # workers.\n if self.mode is None:\n self.cached_functions_to_run.append(function)\n else:\n # Attempt to pickle the function before we need it. This could\n # fail, and it is more convenient if the failure happens before we\n # actually run the function locally.\n pickled_function = pickle.dumps(function)\n\n function_to_run_id = hashlib.sha1(pickled_function).digest()\n key = b\"FunctionsToRun:\" + function_to_run_id\n # First run the function on the driver.\n # We always run the task locally.\n function({\"worker\": self})\n # Check if the function has already been put into redis.\n function_exported = self.redis_client.setnx(b\"Lock:\" + key, 1)\n if not function_exported:\n # In this case, the function has already been exported, so\n # we don't need to export it again.\n return\n # Run the function on all workers.\n self.redis_client.hmset(key,\n {\"driver_id\": self.task_driver_id.id(),\n \"function_id\": function_to_run_id,\n \"function\": pickled_function})\n self.redis_client.rpush(\"Exports\", key)\n # TODO(rkn): If the worker fails after it calls setnx and before it\n # successfully completes the hmset and rpush, then the program will\n # most likely hang. This could be fixed by making these three\n # operations into a transaction (or by implementing a custom\n # command that does all three things).\n\n def _wait_for_function(self, function_id, driver_id, timeout=10):\n \"\"\"Wait until the function to be executed is present on this worker.\n\n This method will simply loop until the import thread has imported the\n relevant function. If we spend too long in this loop, that may indicate\n a problem somewhere and we will push an error message to the user.\n\n If this worker is an actor, then this will wait until the actor has\n been defined.\n\n Args:\n is_actor (bool): True if this worker is an actor, and false\n otherwise.\n function_id (str): The ID of the function that we want to execute.\n driver_id (str): The ID of the driver to push the error message to\n if this times out.\n \"\"\"\n start_time = time.time()\n # Only send the warning once.\n warning_sent = False\n while True:\n with self.lock:\n if (self.actor_id == NIL_ACTOR_ID and\n (function_id.id() in self.functions[driver_id])):\n break\n elif self.actor_id != NIL_ACTOR_ID and (self.actor_id in\n self.actors):\n break\n if time.time() - start_time > timeout:\n warning_message = (\"This worker was asked to execute a \"\n \"function that it does not have \"\n \"registered. You may have to restart \"\n \"Ray.\")\n if not warning_sent:\n ray.utils.push_error_to_driver(self.redis_client,\n \"wait_for_function\",\n warning_message,\n driver_id=driver_id)\n warning_sent = True\n time.sleep(0.001)\n\n def _get_arguments_for_execution(self, function_name, serialized_args):\n \"\"\"Retrieve the arguments for the remote function.\n\n This retrieves the values for the arguments to the remote function that\n were passed in as object IDs. Argumens that were passed by value are\n not changed. This is called by the worker that is executing the remote\n function.\n\n Args:\n function_name (str): The name of the remote function whose\n arguments are being retrieved.\n serialized_args (List): The arguments to the function. These are\n either strings representing serialized objects passed by value\n or they are ObjectIDs.\n\n Returns:\n The retrieved arguments in addition to the arguments that were\n passed by value.\n\n Raises:\n RayGetArgumentError: This exception is raised if a task that\n created one of the arguments failed.\n \"\"\"\n arguments = []\n for (i, arg) in enumerate(serialized_args):\n if isinstance(arg, ray.local_scheduler.ObjectID):\n # get the object from the local object store\n argument = self.get_object([arg])[0]\n if isinstance(argument, RayTaskError):\n # If the result is a RayTaskError, then the task that\n # created this object failed, and we should propagate the\n # error message here.\n raise RayGetArgumentError(function_name, i, arg, argument)\n elif isinstance(argument, ray.actor.ActorHandleWrapper):\n argument = ray.actor.unwrap_actor_handle(self, argument)\n else:\n # pass the argument by value\n argument = arg\n\n arguments.append(argument)\n return arguments\n\n def _store_outputs_in_objstore(self, objectids, outputs):\n \"\"\"Store the outputs of a remote function in the local object store.\n\n This stores the values that were returned by a remote function in the\n local object store. If any of the return values are object IDs, then\n these object IDs are aliased with the object IDs that the scheduler\n assigned for the return values. This is called by the worker that\n executes the remote function.\n\n Note:\n The arguments objectids and outputs should have the same length.\n\n Args:\n objectids (List[ObjectID]): The object IDs that were assigned to\n the outputs of the remote function call.\n outputs (Tuple): The value returned by the remote function. If the\n remote function was supposed to only return one value, then its\n output was wrapped in a tuple with one element prior to being\n passed into this function.\n \"\"\"\n for i in range(len(objectids)):\n self.put_object(objectids[i], outputs[i])\n\n def _process_task(self, task):\n \"\"\"Execute a task assigned to this worker.\n\n This method deserializes a task from the scheduler, and attempts to\n execute the task. If the task succeeds, the outputs are stored in the\n local object store. If the task throws an exception, RayTaskError\n objects are stored in the object store to represent the failed task\n (these will be retrieved by calls to get or by subsequent tasks that\n use the outputs of this task).\n \"\"\"\n # The ID of the driver that this task belongs to. This is needed so\n # that if the task throws an exception, we propagate the error\n # message to the correct driver.\n self.task_driver_id = task.driver_id()\n self.current_task_id = task.task_id()\n self.current_function_id = task.function_id().id()\n self.task_index = 0\n self.put_index = 0\n function_id = task.function_id()\n args = task.arguments()\n return_object_ids = task.returns()\n if task.actor_id().id() != NIL_ACTOR_ID:\n dummy_return_id = return_object_ids.pop()\n function_name, function_executor = (self.functions\n [self.task_driver_id.id()]\n [function_id.id()])\n\n # Get task arguments from the object store.\n try:\n with log_span(\"ray:task:get_arguments\", worker=self):\n arguments = self._get_arguments_for_execution(function_name,\n args)\n except (RayGetError, RayGetArgumentError) as e:\n self._handle_process_task_failure(function_id, return_object_ids,\n e, None)\n return\n except Exception as e:\n self._handle_process_task_failure(\n function_id, return_object_ids, e,\n format_error_message(traceback.format_exc()))\n return\n\n # Execute the task.\n try:\n with log_span(\"ray:task:execute\", worker=self):\n if task.actor_id().id() == NIL_ACTOR_ID:\n outputs = function_executor.executor(arguments)\n else:\n outputs = function_executor(\n dummy_return_id, task.actor_counter(),\n self.actors[task.actor_id().id()],\n *arguments)\n except Exception as e:\n # Determine whether the exception occured during a task, not an\n # actor method.\n task_exception = task.actor_id().id() == NIL_ACTOR_ID\n traceback_str = format_error_message(traceback.format_exc(),\n task_exception=task_exception)\n self._handle_process_task_failure(function_id, return_object_ids,\n e, traceback_str)\n return\n\n # Store the outputs in the local object store.\n try:\n with log_span(\"ray:task:store_outputs\", worker=self):\n # If this is an actor task, then the last object ID returned by\n # the task is a dummy output, not returned by the function\n # itself. Decrement to get the correct number of return values.\n num_returns = len(return_object_ids)\n if num_returns == 1:\n outputs = (outputs,)\n self._store_outputs_in_objstore(return_object_ids, outputs)\n except Exception as e:\n self._handle_process_task_failure(\n function_id, return_object_ids, e,\n format_error_message(traceback.format_exc()))\n\n def _handle_process_task_failure(self, function_id, return_object_ids,\n error, backtrace):\n function_name, _ = self.functions[\n self.task_driver_id.id()][function_id.id()]\n failure_object = RayTaskError(function_name, error, backtrace)\n failure_objects = [failure_object for _ in\n range(len(return_object_ids))]\n self._store_outputs_in_objstore(return_object_ids, failure_objects)\n # Log the error message.\n ray.utils.push_error_to_driver(self.redis_client,\n \"task\",\n str(failure_object),\n driver_id=self.task_driver_id.id(),\n data={\"function_id\": function_id.id(),\n \"function_name\": function_name})\n\n def _wait_for_and_process_task(self, task):\n \"\"\"Wait for a task to be ready and process the task.\n\n Args:\n task: The task to execute.\n \"\"\"\n function_id = task.function_id()\n # Wait until the function to be executed has actually been registered\n # on this worker. We will push warnings to the user if we spend too\n # long in this loop.\n with log_span(\"ray:wait_for_function\", worker=self):\n self._wait_for_function(function_id, task.driver_id().id())\n\n # Execute the task.\n # TODO(rkn): Consider acquiring this lock with a timeout and pushing a\n # warning to the user if we are waiting too long to acquire the lock\n # because that may indicate that the system is hanging, and it'd be\n # good to know where the system is hanging.\n log(event_type=\"ray:acquire_lock\", kind=LOG_SPAN_START, worker=self)\n with self.lock:\n log(event_type=\"ray:acquire_lock\", kind=LOG_SPAN_END,\n worker=self)\n\n function_name, _ = (self.functions[task.driver_id().id()]\n [function_id.id()])\n contents = {\"function_name\": function_name,\n \"task_id\": task.task_id().hex(),\n \"worker_id\": binary_to_hex(self.worker_id)}\n with log_span(\"ray:task\", contents=contents, worker=self):\n self._process_task(task)\n\n # Push all of the log events to the global state store.\n flush_log()\n\n # Increase the task execution counter.\n (self.num_task_executions[task.driver_id().id()]\n [function_id.id()]) += 1\n\n reached_max_executions = (\n self.num_task_executions[task.driver_id().id()]\n [function_id.id()] ==\n self.function_properties[task.driver_id().id()]\n [function_id.id()].max_calls)\n if reached_max_executions:\n ray.worker.global_worker.local_scheduler_client.disconnect()\n os._exit(0)\n\n def _get_next_task_from_local_scheduler(self):\n \"\"\"Get the next task from the local scheduler.\n\n Returns:\n A task from the local scheduler.\n \"\"\"\n with log_span(\"ray:get_task\", worker=self):\n task = self.local_scheduler_client.get_task(\n self.actor_checkpoint_failed)\n # We assume that the task is not a checkpoint, or that if it is,\n # that the task will succeed. The checkpoint task executor is\n # responsible for reporting task failure to the local scheduler.\n self.actor_checkpoint_failed = False\n\n # Automatically restrict the GPUs available to this task.\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in ray.get_gpu_ids()])\n\n return task\n\n def main_loop(self):\n \"\"\"The main loop a worker runs to receive and execute tasks.\"\"\"\n\n def exit(signum, frame):\n cleanup(worker=self)\n sys.exit(0)\n\n signal.signal(signal.SIGTERM, exit)\n\n check_main_thread()\n while True:\n task = self._get_next_task_from_local_scheduler()\n self._wait_for_and_process_task(task)\n\n\ndef get_gpu_ids():\n \"\"\"Get the IDs of the GPU that are available to the worker.\n\n Each ID is an integer in the range [0, NUM_GPUS - 1], where NUM_GPUS is the\n number of GPUs that the node has.\n \"\"\"\n if _mode() == PYTHON_MODE:\n raise Exception(\"ray.get_gpu_ids() currently does not work in PYTHON \"\n \"MODE.\")\n return global_worker.local_scheduler_client.gpu_ids()\n\n\ndef _webui_url_helper(client):\n \"\"\"Parsing for getting the url of the web UI.\n\n Args:\n client: A redis client to use to query the primary Redis shard.\n\n Returns:\n The URL of the web UI as a string.\n \"\"\"\n result = client.hmget(\"webui\", \"url\")[0]\n return result.decode(\"ascii\") if result is not None else result\n\n\ndef get_webui_url():\n \"\"\"Get the URL to access the web UI.\n\n Note that the URL does not specify which node the web UI is on.\n\n Returns:\n The URL of the web UI as a string.\n \"\"\"\n if _mode() == PYTHON_MODE:\n raise Exception(\"ray.get_webui_url() currently does not work in \"\n \"PYTHON MODE.\")\n return _webui_url_helper(global_worker.redis_client)\n\n\nglobal_worker = Worker()\n\"\"\"Worker: The global Worker object for this worker process.\n\nWe use a global Worker object to ensure that there is a single worker object\nper worker process.\n\"\"\"\n\nglobal_state = state.GlobalState()\n\n\nclass RayConnectionError(Exception):\n pass\n\n\ndef check_main_thread():\n \"\"\"Check that we are currently on the main thread.\n\n Raises:\n Exception: An exception is raised if this is called on a thread other\n than the main thread.\n \"\"\"\n if threading.current_thread().getName() != \"MainThread\":\n raise Exception(\"The Ray methods are not thread safe and must be \"\n \"called from the main thread. This method was called \"\n \"from thread {}.\"\n .format(threading.current_thread().getName()))\n\n\ndef check_connected(worker=global_worker):\n \"\"\"Check if the worker is connected.\n\n Raises:\n Exception: An exception is raised if the worker is not connected.\n \"\"\"\n if not worker.connected:\n raise RayConnectionError(\"This command cannot be called before Ray \"\n \"has been started. You can start Ray with \"\n \"'ray.init()'.\")\n\n\ndef print_failed_task(task_status):\n \"\"\"Print information about failed tasks.\n\n Args:\n task_status (Dict): A dictionary containing the name, operationid, and\n error message for a failed task.\n \"\"\"\n print(\"\"\"\n Error: Task failed\n Function Name: {}\n Task ID: {}\n Error Message: \\n{}\n \"\"\".format(task_status[\"function_name\"], task_status[\"operationid\"],\n task_status[\"error_message\"]))\n\n\ndef error_applies_to_driver(error_key, worker=global_worker):\n \"\"\"Return True if the error is for this driver and false otherwise.\"\"\"\n # TODO(rkn): Should probably check that this is only called on a driver.\n # Check that the error key is formatted as in push_error_to_driver.\n assert len(error_key) == (len(ERROR_KEY_PREFIX) + DRIVER_ID_LENGTH + 1 +\n ERROR_ID_LENGTH), error_key\n # If the driver ID in the error message is a sequence of all zeros, then\n # the message is intended for all drivers.\n generic_driver_id = DRIVER_ID_LENGTH * b\"\\x00\"\n driver_id = error_key[len(ERROR_KEY_PREFIX):(len(ERROR_KEY_PREFIX) +\n DRIVER_ID_LENGTH)]\n return (driver_id == worker.task_driver_id.id() or\n driver_id == generic_driver_id)\n\n\ndef error_info(worker=global_worker):\n \"\"\"Return information about failed tasks.\"\"\"\n check_connected(worker)\n check_main_thread()\n error_keys = worker.redis_client.lrange(\"ErrorKeys\", 0, -1)\n errors = []\n for error_key in error_keys:\n if error_applies_to_driver(error_key, worker=worker):\n error_contents = worker.redis_client.hgetall(error_key)\n # If the error is an object hash mismatch, look up the function\n # name for the nondeterministic task. TODO(rkn): Change this so\n # that we don't have to look up additional information. Ideally all\n # relevant information would already be in error_contents.\n error_type = error_contents[b\"type\"]\n if error_type in [OBJECT_HASH_MISMATCH_ERROR_TYPE,\n PUT_RECONSTRUCTION_ERROR_TYPE]:\n function_id = error_contents[b\"data\"]\n if function_id == NIL_FUNCTION_ID:\n function_name = b\"Driver\"\n else:\n task_driver_id = worker.task_driver_id\n function_name = worker.redis_client.hget(\n (b\"RemoteFunction:\" + task_driver_id.id() +\n b\":\" + function_id),\n \"name\")\n error_contents[b\"data\"] = function_name\n errors.append(error_contents)\n\n return errors\n\n\ndef _initialize_serialization(worker=global_worker):\n \"\"\"Initialize the serialization library.\n\n This defines a custom serializer for object IDs and also tells ray to\n serialize several exception classes that we define for error handling.\n \"\"\"\n worker.serialization_context = pyarrow.SerializationContext()\n pyarrow.register_default_serialization_handlers(\n worker.serialization_context)\n\n # Define a custom serializer and deserializer for handling Object IDs.\n def objectid_custom_serializer(obj):\n return obj.id()\n\n def objectid_custom_deserializer(serialized_obj):\n return ray.local_scheduler.ObjectID(serialized_obj)\n\n worker.serialization_context.register_type(\n ray.local_scheduler.ObjectID, \"ray.ObjectID\", pickle=False,\n custom_serializer=objectid_custom_serializer,\n custom_deserializer=objectid_custom_deserializer)\n\n if worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n # These should only be called on the driver because\n # register_custom_serializer will export the class to all of the\n # workers.\n register_custom_serializer(RayTaskError, use_dict=True)\n register_custom_serializer(RayGetError, use_dict=True)\n register_custom_serializer(RayGetArgumentError, use_dict=True)\n # Tell Ray to serialize lambdas with pickle.\n register_custom_serializer(type(lambda: 0), use_pickle=True)\n # Tell Ray to serialize types with pickle.\n register_custom_serializer(type(int), use_pickle=True)\n # Ray can serialize actor handles that have been wrapped.\n register_custom_serializer(ray.actor.ActorHandleWrapper,\n use_dict=True)\n # Tell Ray to serialize FunctionSignatures as dictionaries. This is\n # used when passing around actor handles.\n register_custom_serializer(ray.signature.FunctionSignature,\n use_dict=True)\n\n\ndef get_address_info_from_redis_helper(redis_address, node_ip_address):\n redis_ip_address, redis_port = redis_address.split(\":\")\n # For this command to work, some other client (on the same machine as\n # Redis) must have run \"CONFIG SET protected-mode no\".\n redis_client = redis.StrictRedis(host=redis_ip_address,\n port=int(redis_port))\n # The client table prefix must be kept in sync with the file\n # \"src/common/redis_module/ray_redis_module.cc\" where it is defined.\n REDIS_CLIENT_TABLE_PREFIX = \"CL:\"\n client_keys = redis_client.keys(\"{}*\".format(REDIS_CLIENT_TABLE_PREFIX))\n # Filter to live clients on the same node and do some basic checking.\n plasma_managers = []\n local_schedulers = []\n for key in client_keys:\n info = redis_client.hgetall(key)\n\n # Ignore clients that were deleted.\n deleted = info[b\"deleted\"]\n deleted = bool(int(deleted))\n if deleted:\n continue\n\n assert b\"ray_client_id\" in info\n assert b\"node_ip_address\" in info\n assert b\"client_type\" in info\n if info[b\"node_ip_address\"].decode(\"ascii\") == node_ip_address:\n if info[b\"client_type\"].decode(\"ascii\") == \"plasma_manager\":\n plasma_managers.append(info)\n elif info[b\"client_type\"].decode(\"ascii\") == \"local_scheduler\":\n local_schedulers.append(info)\n # Make sure that we got at least one plasma manager and local scheduler.\n assert len(plasma_managers) >= 1\n assert len(local_schedulers) >= 1\n # Build the address information.\n object_store_addresses = []\n for manager in plasma_managers:\n address = manager[b\"manager_address\"].decode(\"ascii\")\n port = services.get_port(address)\n object_store_addresses.append(\n services.ObjectStoreAddress(\n name=manager[b\"store_socket_name\"].decode(\"ascii\"),\n manager_name=manager[b\"manager_socket_name\"].decode(\"ascii\"),\n manager_port=port))\n scheduler_names = [\n scheduler[b\"local_scheduler_socket_name\"].decode(\"ascii\")\n for scheduler in local_schedulers]\n client_info = {\"node_ip_address\": node_ip_address,\n \"redis_address\": redis_address,\n \"object_store_addresses\": object_store_addresses,\n \"local_scheduler_socket_names\": scheduler_names,\n # Web UI should be running.\n \"webui_url\": _webui_url_helper(redis_client)}\n return client_info\n\n\ndef get_address_info_from_redis(redis_address, node_ip_address, num_retries=5):\n counter = 0\n while True:\n try:\n return get_address_info_from_redis_helper(redis_address,\n node_ip_address)\n except Exception as e:\n if counter == num_retries:\n raise\n # Some of the information may not be in Redis yet, so wait a little\n # bit.\n print(\"Some processes that the driver needs to connect to have \"\n \"not registered with Redis, so retrying. Have you run \"\n \"'ray start' on this node?\")\n time.sleep(1)\n counter += 1\n\n\ndef _normalize_resource_arguments(num_cpus, num_gpus, resources,\n num_local_schedulers):\n \"\"\"Stick the CPU and GPU arguments into the resources dictionary.\n\n This also checks that the arguments are well-formed.\n\n Args:\n num_cpus: Either a number of CPUs or a list of numbers of CPUs.\n num_gpus: Either a number of CPUs or a list of numbers of CPUs.\n resources: Either a dictionary of resource mappings or a list of\n dictionaries of resource mappings.\n num_local_schedulers: The number of local schedulers.\n\n Returns:\n A list of dictionaries of resources of length num_local_schedulers.\n \"\"\"\n if resources is None:\n resources = {}\n if not isinstance(num_cpus, list):\n num_cpus = num_local_schedulers * [num_cpus]\n if not isinstance(num_gpus, list):\n num_gpus = num_local_schedulers * [num_gpus]\n if not isinstance(resources, list):\n resources = num_local_schedulers * [resources]\n\n new_resources = [r.copy() for r in resources]\n\n for i in range(num_local_schedulers):\n assert \"CPU\" not in new_resources[i], \"Use the 'num_cpus' argument.\"\n assert \"GPU\" not in new_resources[i], \"Use the 'num_gpus' argument.\"\n if num_cpus[i] is not None:\n new_resources[i][\"CPU\"] = num_cpus[i]\n if num_gpus[i] is not None:\n new_resources[i][\"GPU\"] = num_gpus[i]\n\n return new_resources\n\n\ndef _init(address_info=None,\n start_ray_local=False,\n object_id_seed=None,\n num_workers=None,\n num_local_schedulers=None,\n object_store_memory=None,\n driver_mode=SCRIPT_MODE,\n redirect_output=False,\n start_workers_from_local_scheduler=True,\n num_cpus=None,\n num_gpus=None,\n resources=None,\n num_redis_shards=None,\n redis_max_clients=None,\n plasma_directory=None,\n huge_pages=False,\n include_webui=True):\n \"\"\"Helper method to connect to an existing Ray cluster or start a new one.\n\n This method handles two cases. Either a Ray cluster already exists and we\n just attach this driver to it, or we start all of the processes associated\n with a Ray cluster and attach to the newly started cluster.\n\n Args:\n address_info (dict): A dictionary with address information for\n processes in a partially-started Ray cluster. If\n start_ray_local=True, any processes not in this dictionary will be\n started. If provided, an updated address_info dictionary will be\n returned to include processes that are newly started.\n start_ray_local (bool): If True then this will start any processes not\n already in address_info, including Redis, a global scheduler, local\n scheduler(s), object store(s), and worker(s). It will also kill\n these processes when Python exits. If False, this will attach to an\n existing Ray cluster.\n object_id_seed (int): Used to seed the deterministic generation of\n object IDs. The same value can be used across multiple runs of the\n same job in order to generate the object IDs in a consistent\n manner. However, the same ID should not be used for different jobs.\n num_workers (int): The number of workers to start. This is only\n provided if start_ray_local is True.\n num_local_schedulers (int): The number of local schedulers to start.\n This is only provided if start_ray_local is True.\n object_store_memory: The amount of memory (in bytes) to start the\n object store with.\n driver_mode (bool): The mode in which to start the driver. This should\n be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.\n redirect_output (bool): True if stdout and stderr for all the processes\n should be redirected to files and false otherwise.\n start_workers_from_local_scheduler (bool): If this flag is True, then\n start the initial workers from the local scheduler. Else, start\n them from Python. The latter case is for debugging purposes only.\n num_cpus (int): Number of cpus the user wishes all local schedulers to\n be configured with.\n num_gpus (int): Number of gpus the user wishes all local schedulers to\n be configured with. If unspecified, Ray will attempt to autodetect\n the number of GPUs available on the node (note that autodetection\n currently only works for Nvidia GPUs).\n resources: A dictionary mapping resource names to the quantity of that\n resource available.\n num_redis_shards: The number of Redis shards to start in addition to\n the primary Redis shard.\n redis_max_clients: If provided, attempt to configure Redis with this\n maxclients number.\n plasma_directory: A directory where the Plasma memory mapped files will\n be created.\n huge_pages: Boolean flag indicating whether to start the Object\n Store with hugetlbfs support. Requires plasma_directory.\n include_webui: Boolean flag indicating whether to start the web\n UI, which is a Jupyter notebook.\n\n Returns:\n Address information about the started processes.\n\n Raises:\n Exception: An exception is raised if an inappropriate combination of\n arguments is passed in.\n \"\"\"\n check_main_thread()\n if driver_mode not in [SCRIPT_MODE, PYTHON_MODE, SILENT_MODE]:\n raise Exception(\"Driver_mode must be in [ray.SCRIPT_MODE, \"\n \"ray.PYTHON_MODE, ray.SILENT_MODE].\")\n\n # Get addresses of existing services.\n if address_info is None:\n address_info = {}\n else:\n assert isinstance(address_info, dict)\n node_ip_address = address_info.get(\"node_ip_address\")\n redis_address = address_info.get(\"redis_address\")\n\n # Start any services that do not yet exist.\n if driver_mode == PYTHON_MODE:\n # If starting Ray in PYTHON_MODE, don't start any other processes.\n pass\n elif start_ray_local:\n # In this case, we launch a scheduler, a new object store, and some\n # workers, and we connect to them. We do not launch any processes that\n # are already registered in address_info.\n # Use the address 127.0.0.1 in local mode.\n node_ip_address = (\"127.0.0.1\" if node_ip_address is None\n else node_ip_address)\n # Use 1 local scheduler if num_local_schedulers is not provided. If\n # existing local schedulers are provided, use that count as\n # num_local_schedulers.\n local_schedulers = address_info.get(\"local_scheduler_socket_names\", [])\n if num_local_schedulers is None:\n if len(local_schedulers) > 0:\n num_local_schedulers = len(local_schedulers)\n else:\n num_local_schedulers = 1\n # Use 1 additional redis shard if num_redis_shards is not provided.\n num_redis_shards = 1 if num_redis_shards is None else num_redis_shards\n\n # Stick the CPU and GPU resources into the resource dictionary.\n resources = _normalize_resource_arguments(num_cpus, num_gpus,\n resources,\n num_local_schedulers)\n\n # Start the scheduler, object store, and some workers. These will be\n # killed by the call to cleanup(), which happens when the Python script\n # exits.\n address_info = services.start_ray_head(\n address_info=address_info,\n node_ip_address=node_ip_address,\n num_workers=num_workers,\n num_local_schedulers=num_local_schedulers,\n object_store_memory=object_store_memory,\n redirect_output=redirect_output,\n start_workers_from_local_scheduler=(\n start_workers_from_local_scheduler),\n resources=resources,\n num_redis_shards=num_redis_shards,\n redis_max_clients=redis_max_clients,\n plasma_directory=plasma_directory,\n huge_pages=huge_pages,\n include_webui=include_webui)\n else:\n if redis_address is None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"redis_address must be provided.\")\n if num_workers is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"num_workers must not be provided.\")\n if num_local_schedulers is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"num_local_schedulers must not be provided.\")\n if num_cpus is not None or num_gpus is not None:\n raise Exception(\"When connecting to an existing cluster, num_cpus \"\n \"and num_gpus must not be provided.\")\n if resources is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"resources must not be provided.\")\n if num_redis_shards is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"num_redis_shards must not be provided.\")\n if redis_max_clients is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"redis_max_clients must not be provided.\")\n if object_store_memory is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"object_store_memory must not be provided.\")\n if plasma_directory is not None:\n raise Exception(\"When connecting to an existing cluster, \"\n \"plasma_directory must not be provided.\")\n if huge_pages:\n raise Exception(\"When connecting to an existing cluster, \"\n \"huge_pages must not be provided.\")\n # Get the node IP address if one is not provided.\n if node_ip_address is None:\n node_ip_address = services.get_node_ip_address(redis_address)\n # Get the address info of the processes to connect to from Redis.\n address_info = get_address_info_from_redis(redis_address,\n node_ip_address)\n\n # Connect this driver to Redis, the object store, and the local scheduler.\n # Choose the first object store and local scheduler if there are multiple.\n # The corresponding call to disconnect will happen in the call to cleanup()\n # when the Python script exits.\n if driver_mode == PYTHON_MODE:\n driver_address_info = {}\n else:\n driver_address_info = {\n \"node_ip_address\": node_ip_address,\n \"redis_address\": address_info[\"redis_address\"],\n \"store_socket_name\": (\n address_info[\"object_store_addresses\"][0].name),\n \"manager_socket_name\": (\n address_info[\"object_store_addresses\"][0].manager_name),\n \"local_scheduler_socket_name\": (\n address_info[\"local_scheduler_socket_names\"][0]),\n \"webui_url\": address_info[\"webui_url\"]}\n connect(driver_address_info, object_id_seed=object_id_seed,\n mode=driver_mode, worker=global_worker, actor_id=NIL_ACTOR_ID)\n return address_info\n\n\ndef init(redis_address=None, node_ip_address=None, object_id_seed=None,\n num_workers=None, driver_mode=SCRIPT_MODE, redirect_output=False,\n num_cpus=None, num_gpus=None, resources=None,\n num_custom_resource=None, num_redis_shards=None,\n redis_max_clients=None, plasma_directory=None,\n huge_pages=False, include_webui=True):\n \"\"\"Connect to an existing Ray cluster or start one and connect to it.\n\n This method handles two cases. Either a Ray cluster already exists and we\n just attach this driver to it, or we start all of the processes associated\n with a Ray cluster and attach to the newly started cluster.\n\n Args:\n node_ip_address (str): The IP address of the node that we are on.\n redis_address (str): The address of the Redis server to connect to. If\n this address is not provided, then this command will start Redis, a\n global scheduler, a local scheduler, a plasma store, a plasma\n manager, and some workers. It will also kill these processes when\n Python exits.\n object_id_seed (int): Used to seed the deterministic generation of\n object IDs. The same value can be used across multiple runs of the\n same job in order to generate the object IDs in a consistent\n manner. However, the same ID should not be used for different jobs.\n num_workers (int): The number of workers to start. This is only\n provided if redis_address is not provided.\n driver_mode (bool): The mode in which to start the driver. This should\n be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.\n redirect_output (bool): True if stdout and stderr for all the processes\n should be redirected to files and false otherwise.\n num_cpus (int): Number of cpus the user wishes all local schedulers to\n be configured with.\n num_gpus (int): Number of gpus the user wishes all local schedulers to\n be configured with.\n resources: A dictionary mapping the name of a resource to the quantity\n of that resource available.\n num_redis_shards: The number of Redis shards to start in addition to\n the primary Redis shard.\n redis_max_clients: If provided, attempt to configure Redis with this\n maxclients number.\n plasma_directory: A directory where the Plasma memory mapped files will\n be created.\n huge_pages: Boolean flag indicating whether to start the Object\n Store with hugetlbfs support. Requires plasma_directory.\n include_webui: Boolean flag indicating whether to start the web\n UI, which is a Jupyter notebook.\n\n Returns:\n Address information about the started processes.\n\n Raises:\n Exception: An exception is raised if an inappropriate combination of\n arguments is passed in.\n \"\"\"\n # Convert hostnames to numerical IP address.\n if node_ip_address is not None:\n node_ip_address = services.address_to_ip(node_ip_address)\n if redis_address is not None:\n redis_address = services.address_to_ip(redis_address)\n\n info = {\"node_ip_address\": node_ip_address,\n \"redis_address\": redis_address}\n return _init(address_info=info, start_ray_local=(redis_address is None),\n num_workers=num_workers, driver_mode=driver_mode,\n redirect_output=redirect_output, num_cpus=num_cpus,\n num_gpus=num_gpus, resources=resources,\n num_redis_shards=num_redis_shards,\n redis_max_clients=redis_max_clients,\n plasma_directory=plasma_directory,\n huge_pages=huge_pages,\n include_webui=include_webui)\n\n\ndef cleanup(worker=global_worker):\n \"\"\"Disconnect the worker, and terminate any processes started in init.\n\n This will automatically run at the end when a Python process that uses Ray\n exits. It is ok to run this twice in a row. Note that we manually call\n services.cleanup() in the tests because we need to start and stop many\n clusters in the tests, but the import and exit only happen once.\n \"\"\"\n disconnect(worker)\n if hasattr(worker, \"local_scheduler_client\"):\n del worker.local_scheduler_client\n if hasattr(worker, \"plasma_client\"):\n worker.plasma_client.disconnect()\n\n if worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n # If this is a driver, push the finish time to Redis and clean up any\n # other services that were started with the driver.\n worker.redis_client.hmset(b\"Drivers:\" + worker.worker_id,\n {\"end_time\": time.time()})\n services.cleanup()\n else:\n # If this is not a driver, make sure there are no orphan processes,\n # besides possibly the worker itself.\n for process_type, processes in services.all_processes.items():\n if process_type == services.PROCESS_TYPE_WORKER:\n assert(len(processes)) <= 1\n else:\n assert(len(processes) == 0)\n\n worker.set_mode(None)\n\n\natexit.register(cleanup)\n\n# Define a custom excepthook so that if the driver exits with an exception, we\n# can push that exception to Redis.\nnormal_excepthook = sys.excepthook\n\n\ndef custom_excepthook(type, value, tb):\n # If this is a driver, push the exception to redis.\n if global_worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n error_message = \"\".join(traceback.format_tb(tb))\n global_worker.redis_client.hmset(b\"Drivers:\" + global_worker.worker_id,\n {\"exception\": error_message})\n # Call the normal excepthook.\n normal_excepthook(type, value, tb)\n\n\nsys.excepthook = custom_excepthook\n\n\ndef print_error_messages(worker):\n \"\"\"Print error messages in the background on the driver.\n\n This runs in a separate thread on the driver and prints error messages in\n the background.\n \"\"\"\n # TODO(rkn): All error messages should have a \"component\" field indicating\n # which process the error came from (e.g., a worker or a plasma store).\n # Currently all error messages come from workers.\n\n helpful_message = \"\"\"\n You can inspect errors by running\n\n ray.error_info()\n\n If this driver is hanging, start a new one with\n\n ray.init(redis_address=\"{}\")\n \"\"\".format(worker.redis_address)\n\n worker.error_message_pubsub_client = worker.redis_client.pubsub()\n # Exports that are published after the call to\n # error_message_pubsub_client.subscribe and before the call to\n # error_message_pubsub_client.listen will still be processed in the loop.\n worker.error_message_pubsub_client.subscribe(\"__keyspace@0__:ErrorKeys\")\n num_errors_received = 0\n\n # Get the exports that occurred before the call to subscribe.\n with worker.lock:\n error_keys = worker.redis_client.lrange(\"ErrorKeys\", 0, -1)\n for error_key in error_keys:\n if error_applies_to_driver(error_key, worker=worker):\n error_message = worker.redis_client.hget(\n error_key, \"message\").decode(\"ascii\")\n print(error_message)\n print(helpful_message)\n num_errors_received += 1\n\n try:\n for msg in worker.error_message_pubsub_client.listen():\n with worker.lock:\n for error_key in worker.redis_client.lrange(\n \"ErrorKeys\", num_errors_received, -1):\n if error_applies_to_driver(error_key, worker=worker):\n error_message = worker.redis_client.hget(\n error_key, \"message\").decode(\"ascii\")\n print(error_message)\n print(helpful_message)\n num_errors_received += 1\n except redis.ConnectionError:\n # When Redis terminates the listen call will throw a ConnectionError,\n # which we catch here.\n pass\n\n\ndef fetch_and_register_remote_function(key, worker=global_worker):\n \"\"\"Import a remote function.\"\"\"\n (driver_id, function_id_str, function_name,\n serialized_function, num_return_vals, module, resources,\n max_calls) = worker.redis_client.hmget(\n key, [\"driver_id\",\n \"function_id\",\n \"name\",\n \"function\",\n \"num_return_vals\",\n \"module\",\n \"resources\",\n \"max_calls\"])\n function_id = ray.local_scheduler.ObjectID(function_id_str)\n function_name = function_name.decode(\"ascii\")\n function_properties = FunctionProperties(\n num_return_vals=int(num_return_vals),\n resources=json.loads(resources.decode(\"ascii\")),\n max_calls=int(max_calls))\n module = module.decode(\"ascii\")\n\n # This is a placeholder in case the function can't be unpickled. This will\n # be overwritten if the function is successfully registered.\n def f():\n raise Exception(\"This function was not imported properly.\")\n remote_f_placeholder = remote(function_id=function_id)(lambda *xs: f())\n worker.functions[driver_id][function_id.id()] = (function_name,\n remote_f_placeholder)\n worker.function_properties[driver_id][function_id.id()] = (\n function_properties)\n worker.num_task_executions[driver_id][function_id.id()] = 0\n\n try:\n function = pickle.loads(serialized_function)\n except Exception:\n # If an exception was thrown when the remote function was imported, we\n # record the traceback and notify the scheduler of the failure.\n traceback_str = format_error_message(traceback.format_exc())\n # Log the error message.\n ray.utils.push_error_to_driver(worker.redis_client,\n \"register_remote_function\",\n traceback_str,\n driver_id=driver_id,\n data={\"function_id\": function_id.id(),\n \"function_name\": function_name})\n else:\n # TODO(rkn): Why is the below line necessary?\n function.__module__ = module\n worker.functions[driver_id][function_id.id()] = (\n function_name, remote(function_id=function_id)(function))\n # Add the function to the function table.\n worker.redis_client.rpush(b\"FunctionTable:\" + function_id.id(),\n worker.worker_id)\n\n\ndef fetch_and_execute_function_to_run(key, worker=global_worker):\n \"\"\"Run on arbitrary function on the worker.\"\"\"\n driver_id, serialized_function = worker.redis_client.hmget(\n key, [\"driver_id\", \"function\"])\n try:\n # Deserialize the function.\n function = pickle.loads(serialized_function)\n # Run the function.\n function({\"worker\": worker})\n except Exception:\n # If an exception was thrown when the function was run, we record the\n # traceback and notify the scheduler of the failure.\n traceback_str = traceback.format_exc()\n # Log the error message.\n name = function.__name__ if (\"function\" in locals() and\n hasattr(function, \"__name__\")) else \"\"\n ray.utils.push_error_to_driver(worker.redis_client,\n \"function_to_run\",\n traceback_str,\n driver_id=driver_id,\n data={\"name\": name})\n\n\ndef import_thread(worker, mode):\n worker.import_pubsub_client = worker.redis_client.pubsub()\n # Exports that are published after the call to\n # import_pubsub_client.subscribe and before the call to\n # import_pubsub_client.listen will still be processed in the loop.\n worker.import_pubsub_client.subscribe(\"__keyspace@0__:Exports\")\n # Keep track of the number of imports that we've imported.\n num_imported = 0\n\n # Get the exports that occurred before the call to subscribe.\n with worker.lock:\n export_keys = worker.redis_client.lrange(\"Exports\", 0, -1)\n for key in export_keys:\n num_imported += 1\n\n # Handle the driver case first.\n if mode != WORKER_MODE:\n if key.startswith(b\"FunctionsToRun\"):\n fetch_and_execute_function_to_run(key, worker=worker)\n # Continue because FunctionsToRun are the only things that the\n # driver should import.\n continue\n\n if key.startswith(b\"RemoteFunction\"):\n fetch_and_register_remote_function(key, worker=worker)\n elif key.startswith(b\"FunctionsToRun\"):\n fetch_and_execute_function_to_run(key, worker=worker)\n elif key.startswith(b\"ActorClass\"):\n # If this worker is an actor that is supposed to construct this\n # class, fetch the actor and class information and construct\n # the class.\n class_id = key.split(b\":\", 1)[1]\n if (worker.actor_id != NIL_ACTOR_ID and\n worker.class_id == class_id):\n worker.fetch_and_register_actor(key, worker)\n else:\n raise Exception(\"This code should be unreachable.\")\n\n try:\n for msg in worker.import_pubsub_client.listen():\n with worker.lock:\n if msg[\"type\"] == \"subscribe\":\n continue\n assert msg[\"data\"] == b\"rpush\"\n num_imports = worker.redis_client.llen(\"Exports\")\n assert num_imports >= num_imported\n for i in range(num_imported, num_imports):\n num_imported += 1\n key = worker.redis_client.lindex(\"Exports\", i)\n\n # Handle the driver case first.\n if mode != WORKER_MODE:\n if key.startswith(b\"FunctionsToRun\"):\n with log_span(\"ray:import_function_to_run\",\n worker=worker):\n fetch_and_execute_function_to_run(\n key, worker=worker)\n # Continue because FunctionsToRun are the only things\n # that the driver should import.\n continue\n\n if key.startswith(b\"RemoteFunction\"):\n with log_span(\"ray:import_remote_function\",\n worker=worker):\n fetch_and_register_remote_function(key,\n worker=worker)\n elif key.startswith(b\"FunctionsToRun\"):\n with log_span(\"ray:import_function_to_run\",\n worker=worker):\n fetch_and_execute_function_to_run(key,\n worker=worker)\n elif key.startswith(b\"Actor\"):\n # Only get the actor if the actor ID matches the actor\n # ID of this worker.\n actor_id, = worker.redis_client.hmget(key, \"actor_id\")\n if worker.actor_id == actor_id:\n worker.fetch_and_register[\"Actor\"](key, worker)\n else:\n raise Exception(\"This code should be unreachable.\")\n except redis.ConnectionError:\n # When Redis terminates the listen call will throw a ConnectionError,\n # which we catch here.\n pass\n\n\ndef connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker,\n actor_id=NIL_ACTOR_ID):\n \"\"\"Connect this worker to the local scheduler, to Plasma, and to Redis.\n\n Args:\n info (dict): A dictionary with address of the Redis server and the\n sockets of the plasma store, plasma manager, and local scheduler.\n object_id_seed: A seed to use to make the generation of object IDs\n deterministic.\n mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE,\n PYTHON_MODE, and SILENT_MODE.\n actor_id: The ID of the actor running on this worker. If this worker is\n not an actor, then this is NIL_ACTOR_ID.\n \"\"\"\n check_main_thread()\n # Do some basic checking to make sure we didn't call ray.init twice.\n error_message = \"Perhaps you called ray.init twice by accident?\"\n assert not worker.connected, error_message\n assert worker.cached_functions_to_run is not None, error_message\n assert worker.cached_remote_functions_and_actors is not None, error_message\n # Initialize some fields.\n worker.worker_id = random_string()\n worker.actor_id = actor_id\n worker.connected = True\n worker.set_mode(mode)\n # The worker.events field is used to aggregate logging information and\n # display it in the web UI. Note that Python lists protected by the GIL,\n # which is important because we will append to this field from multiple\n # threads.\n worker.events = []\n # If running Ray in PYTHON_MODE, there is no need to create call\n # create_worker or to start the worker service.\n if mode == PYTHON_MODE:\n return\n # Set the node IP address.\n worker.node_ip_address = info[\"node_ip_address\"]\n worker.redis_address = info[\"redis_address\"]\n\n # Create a Redis client.\n redis_ip_address, redis_port = info[\"redis_address\"].split(\":\")\n worker.redis_client = redis.StrictRedis(host=redis_ip_address,\n port=int(redis_port))\n\n # For driver's check that the version information matches the version\n # information that the Ray cluster was started with.\n try:\n ray.services.check_version_info(worker.redis_client)\n except Exception as e:\n if mode in [SCRIPT_MODE, SILENT_MODE]:\n raise e\n elif mode == WORKER_MODE:\n traceback_str = traceback.format_exc()\n ray.utils.push_error_to_driver(worker.redis_client,\n \"version_mismatch\",\n traceback_str,\n driver_id=None)\n\n worker.lock = threading.Lock()\n\n # Check the RedirectOutput key in Redis and based on its value redirect\n # worker output and error to their own files.\n if mode == WORKER_MODE:\n # This key is set in services.py when Redis is started.\n redirect_worker_output_val = worker.redis_client.get(\"RedirectOutput\")\n if (redirect_worker_output_val is not None and\n int(redirect_worker_output_val) == 1):\n redirect_worker_output = 1\n else:\n redirect_worker_output = 0\n if redirect_worker_output:\n log_stdout_file, log_stderr_file = services.new_log_files(\"worker\",\n True)\n sys.stdout = log_stdout_file\n sys.stderr = log_stderr_file\n services.record_log_files_in_redis(info[\"redis_address\"],\n info[\"node_ip_address\"],\n [log_stdout_file,\n log_stderr_file])\n\n # Create an object for interfacing with the global state.\n global_state._initialize_global_state(redis_ip_address, int(redis_port))\n\n # Register the worker with Redis.\n if mode in [SCRIPT_MODE, SILENT_MODE]:\n # The concept of a driver is the same as the concept of a \"job\".\n # Register the driver/job with Redis here.\n import __main__ as main\n driver_info = {\n \"node_ip_address\": worker.node_ip_address,\n \"driver_id\": worker.worker_id,\n \"start_time\": time.time(),\n \"plasma_store_socket\": info[\"store_socket_name\"],\n \"plasma_manager_socket\": info[\"manager_socket_name\"],\n \"local_scheduler_socket\": info[\"local_scheduler_socket_name\"]}\n driver_info[\"name\"] = (main.__file__ if hasattr(main, \"__file__\")\n else \"INTERACTIVE MODE\")\n worker.redis_client.hmset(b\"Drivers:\" + worker.worker_id, driver_info)\n if not worker.redis_client.exists(\"webui\"):\n worker.redis_client.hmset(\"webui\", {\"url\": info[\"webui_url\"]})\n is_worker = False\n elif mode == WORKER_MODE:\n # Register the worker with Redis.\n worker_dict = {\n \"node_ip_address\": worker.node_ip_address,\n \"plasma_store_socket\": info[\"store_socket_name\"],\n \"plasma_manager_socket\": info[\"manager_socket_name\"],\n \"local_scheduler_socket\": info[\"local_scheduler_socket_name\"]}\n if redirect_worker_output:\n worker_dict[\"stdout_file\"] = os.path.abspath(log_stdout_file.name)\n worker_dict[\"stderr_file\"] = os.path.abspath(log_stderr_file.name)\n worker.redis_client.hmset(b\"Workers:\" + worker.worker_id, worker_dict)\n is_worker = True\n else:\n raise Exception(\"This code should be unreachable.\")\n\n # Create an object store client.\n worker.plasma_client = plasma.connect(info[\"store_socket_name\"],\n info[\"manager_socket_name\"],\n 64)\n # Create the local scheduler client.\n if worker.actor_id != NIL_ACTOR_ID:\n num_gpus = int(worker.redis_client.hget(b\"Actor:\" + actor_id,\n \"num_gpus\"))\n else:\n num_gpus = 0\n worker.local_scheduler_client = ray.local_scheduler.LocalSchedulerClient(\n info[\"local_scheduler_socket_name\"], worker.worker_id, worker.actor_id,\n is_worker, num_gpus)\n\n # If this is a driver, set the current task ID, the task driver ID, and set\n # the task index to 0.\n if mode in [SCRIPT_MODE, SILENT_MODE]:\n # If the user provided an object_id_seed, then set the current task ID\n # deterministically based on that seed (without altering the state of\n # the user's random number generator). Otherwise, set the current task\n # ID randomly to avoid object ID collisions.\n numpy_state = np.random.get_state()\n if object_id_seed is not None:\n np.random.seed(object_id_seed)\n else:\n # Try to use true randomness.\n np.random.seed(None)\n worker.current_task_id = ray.local_scheduler.ObjectID(\n np.random.bytes(20))\n # When tasks are executed on remote workers in the context of multiple\n # drivers, the task driver ID is used to keep track of which driver is\n # responsible for the task so that error messages will be propagated to\n # the correct driver.\n worker.task_driver_id = ray.local_scheduler.ObjectID(worker.worker_id)\n # Reset the state of the numpy random number generator.\n np.random.set_state(numpy_state)\n # Set other fields needed for computing task IDs.\n worker.task_index = 0\n worker.put_index = 0\n\n # Create an entry for the driver task in the task table. This task is\n # added immediately with status RUNNING. This allows us to push errors\n # related to this driver task back to the driver. For example, if the\n # driver creates an object that is later evicted, we should notify the\n # user that we're unable to reconstruct the object, since we cannot\n # rerun the driver.\n nil_actor_counter = 0\n driver_task = ray.local_scheduler.Task(\n worker.task_driver_id,\n ray.local_scheduler.ObjectID(NIL_FUNCTION_ID),\n [],\n 0,\n worker.current_task_id,\n worker.task_index,\n ray.local_scheduler.ObjectID(NIL_ACTOR_ID),\n ray.local_scheduler.ObjectID(NIL_ACTOR_ID),\n nil_actor_counter,\n False,\n [],\n {\"CPU\": 0})\n global_state._execute_command(\n driver_task.task_id(),\n \"RAY.TASK_TABLE_ADD\",\n driver_task.task_id().id(),\n TASK_STATUS_RUNNING,\n NIL_LOCAL_SCHEDULER_ID,\n driver_task.execution_dependencies_string(),\n ray.local_scheduler.task_to_string(driver_task))\n # Set the driver's current task ID to the task ID assigned to the\n # driver task.\n worker.current_task_id = driver_task.task_id()\n\n # If this is an actor, get the ID of the corresponding class for the actor.\n if worker.actor_id != NIL_ACTOR_ID:\n actor_key = b\"Actor:\" + worker.actor_id\n class_id = worker.redis_client.hget(actor_key, \"class_id\")\n worker.class_id = class_id\n # Store a list of the dummy outputs produced by actor tasks, to pin the\n # dummy outputs in the object store.\n worker.actor_pinned_objects = []\n\n # Initialize the serialization library. This registers some classes, and so\n # it must be run before we export all of the cached remote functions.\n _initialize_serialization()\n\n # Start a thread to import exports from the driver or from other workers.\n # Note that the driver also has an import thread, which is used only to\n # import custom class definitions from calls to register_custom_serializer\n # that happen under the hood on workers.\n t = threading.Thread(target=import_thread, args=(worker, mode))\n # Making the thread a daemon causes it to exit when the main thread exits.\n t.daemon = True\n t.start()\n\n # If this is a driver running in SCRIPT_MODE, start a thread to print error\n # messages asynchronously in the background. Ideally the scheduler would\n # push messages to the driver's worker service, but we ran into bugs when\n # trying to properly shutdown the driver's worker service, so we are\n # temporarily using this implementation which constantly queries the\n # scheduler for new error messages.\n if mode == SCRIPT_MODE:\n t = threading.Thread(target=print_error_messages, args=(worker,))\n # Making the thread a daemon causes it to exit when the main thread\n # exits.\n t.daemon = True\n t.start()\n\n if mode in [SCRIPT_MODE, SILENT_MODE]:\n # Add the directory containing the script that is running to the Python\n # paths of the workers. Also add the current directory. Note that this\n # assumes that the directory structures on the machines in the clusters\n # are the same.\n script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))\n current_directory = os.path.abspath(os.path.curdir)\n worker.run_function_on_all_workers(\n lambda worker_info: sys.path.insert(1, script_directory))\n worker.run_function_on_all_workers(\n lambda worker_info: sys.path.insert(1, current_directory))\n # TODO(rkn): Here we first export functions to run, then remote\n # functions. The order matters. For example, one of the functions to\n # run may set the Python path, which is needed to import a module used\n # to define a remote function. We may want to change the order to\n # simply be the order in which the exports were defined on the driver.\n # In addition, we will need to retain the ability to decide what the\n # first few exports are (mostly to set the Python path). Additionally,\n # note that the first exports to be defined on the driver will be the\n # ones defined in separate modules that are imported by the driver.\n # Export cached functions_to_run.\n for function in worker.cached_functions_to_run:\n worker.run_function_on_all_workers(function)\n # Export cached remote functions to the workers.\n for cached_type, info in worker.cached_remote_functions_and_actors:\n if cached_type == \"remote_function\":\n (function_id, func_name, func,\n func_invoker, function_properties) = info\n export_remote_function(function_id, func_name, func,\n func_invoker, function_properties,\n worker)\n elif cached_type == \"actor\":\n (key, actor_class_info) = info\n ray.actor.publish_actor_class_to_key(key, actor_class_info,\n worker)\n else:\n assert False, \"This code should be unreachable.\"\n worker.cached_functions_to_run = None\n worker.cached_remote_functions_and_actors = None\n\n\ndef disconnect(worker=global_worker):\n \"\"\"Disconnect this worker from the scheduler and object store.\"\"\"\n # Reset the list of cached remote functions and actors so that if more\n # remote functions or actors are defined and then connect is called again,\n # the remote functions will be exported. This is mostly relevant for the\n # tests.\n worker.connected = False\n worker.cached_functions_to_run = []\n worker.cached_remote_functions_and_actors = []\n worker.serialization_context = pyarrow.SerializationContext()\n\n\ndef _try_to_compute_deterministic_class_id(cls, depth=5):\n \"\"\"Attempt to produce a deterministic class ID for a given class.\n\n The goal here is for the class ID to be the same when this is run on\n different worker processes. Pickling, loading, and pickling again seems to\n produce more consistent results than simply pickling. This is a bit crazy\n and could cause problems, in which case we should revert it and figure out\n something better.\n\n Args:\n cls: The class to produce an ID for.\n depth: The number of times to repeatedly try to load and dump the\n string while trying to reach a fixed point.\n\n Returns:\n A class ID for this class. We attempt to make the class ID the same\n when this function is run on different workers, but that is not\n guaranteed.\n\n Raises:\n Exception: This could raise an exception if cloudpickle raises an\n exception.\n \"\"\"\n # Pickling, loading, and pickling again seems to produce more consistent\n # results than simply pickling. This is a bit\n class_id = pickle.dumps(cls)\n for _ in range(depth):\n new_class_id = pickle.dumps(pickle.loads(class_id))\n if new_class_id == class_id:\n # We appear to have reached a fix point, so use this as the ID.\n return hashlib.sha1(new_class_id).digest()\n class_id = new_class_id\n\n # We have not reached a fixed point, so we may end up with a different\n # class ID for this custom class on each worker, which could lead to the\n # same class definition being exported many many times.\n print(\"WARNING: Could not produce a deterministic class ID for class \"\n \"{}\".format(cls), file=sys.stderr)\n return hashlib.sha1(new_class_id).digest()\n\n\ndef register_custom_serializer(cls, use_pickle=False, use_dict=False,\n serializer=None, deserializer=None,\n local=False, worker=global_worker):\n \"\"\"Enable serialization and deserialization for a particular class.\n\n This method runs the register_class function defined below on every worker,\n which will enable ray to properly serialize and deserialize objects of\n this class.\n\n Args:\n cls (type): The class that ray should serialize.\n use_pickle (bool): If true, then objects of this class will be\n serialized using pickle.\n use_dict: If true, then objects of this class be serialized turning\n their __dict__ fields into a dictionary. Must be False if\n use_pickle is true.\n serializer: The custom serializer to use. This should be provided if\n and only if use_pickle and use_dict are False.\n deserializer: The custom deserializer to use. This should be provided\n if and only if use_pickle and use_dict are False.\n local: True if the serializers should only be registered on the current\n worker. This should usually be False.\n\n Raises:\n Exception: An exception is raised if pickle=False and the class cannot\n be efficiently serialized by Ray. This can also raise an exception\n if use_dict is true and cls is not pickleable.\n \"\"\"\n assert (serializer is None) == (deserializer is None), (\n \"The serializer/deserializer arguments must both be provided or \"\n \"both not be provided.\"\n )\n use_custom_serializer = (serializer is not None)\n\n assert use_custom_serializer + use_pickle + use_dict == 1, (\n \"Exactly one of use_pickle, use_dict, or serializer/deserializer must \"\n \"be specified.\"\n )\n\n if use_dict:\n # Raise an exception if cls cannot be serialized efficiently by Ray.\n serialization.check_serializable(cls)\n\n if not local:\n # In this case, the class ID will be used to deduplicate the class\n # across workers. Note that cloudpickle unfortunately does not produce\n # deterministic strings, so these IDs could be different on different\n # workers. We could use something weaker like cls.__name__, however\n # that would run the risk of having collisions. TODO(rkn): We should\n # improve this.\n try:\n # Attempt to produce a class ID that will be the same on each\n # worker. However, determinism is not guaranteed, and the result\n # may be different on different workers.\n class_id = _try_to_compute_deterministic_class_id(cls)\n except Exception as e:\n raise serialization.CloudPickleError(\"Failed to pickle class \"\n \"'{}'\".format(cls))\n else:\n # In this case, the class ID only needs to be meaningful on this worker\n # and not across workers.\n class_id = random_string()\n\n def register_class_for_serialization(worker_info):\n # TODO(rkn): We need to be more thoughtful about what to do if custom\n # serializers have already been registered for class_id. In some cases,\n # we may want to use the last user-defined serializers and ignore\n # subsequent calls to register_custom_serializer that were made by the\n # system.\n worker_info[\"worker\"].serialization_context.register_type(\n cls, class_id, pickle=use_pickle, custom_serializer=serializer,\n custom_deserializer=deserializer)\n\n if not local:\n worker.run_function_on_all_workers(register_class_for_serialization)\n else:\n # Since we are pickling objects of this class, we don't actually need\n # to ship the class definition.\n register_class_for_serialization({\"worker\": worker})\n\n\nclass RayLogSpan(object):\n \"\"\"An object used to enable logging a span of events with a with statement.\n\n Attributes:\n event_type (str): The type of the event being logged.\n contents: Additional information to log.\n \"\"\"\n def __init__(self, event_type, contents=None, worker=global_worker):\n \"\"\"Initialize a RayLogSpan object.\"\"\"\n self.event_type = event_type\n self.contents = contents\n self.worker = worker\n\n def __enter__(self):\n \"\"\"Log the beginning of a span event.\"\"\"\n log(event_type=self.event_type,\n contents=self.contents,\n kind=LOG_SPAN_START,\n worker=self.worker)\n\n def __exit__(self, type, value, tb):\n \"\"\"Log the end of a span event. Log any exception that occurred.\"\"\"\n if type is None:\n log(event_type=self.event_type, kind=LOG_SPAN_END,\n worker=self.worker)\n else:\n log(event_type=self.event_type,\n contents={\"type\": str(type),\n \"value\": value,\n \"traceback\": traceback.format_exc()},\n kind=LOG_SPAN_END,\n worker=self.worker)\n\n\ndef log_span(event_type, contents=None, worker=global_worker):\n return RayLogSpan(event_type, contents=contents, worker=worker)\n\n\ndef log_event(event_type, contents=None, worker=global_worker):\n log(event_type, kind=LOG_POINT, contents=contents, worker=worker)\n\n\ndef log(event_type, kind, contents=None, worker=global_worker):\n \"\"\"Log an event to the global state store.\n\n This adds the event to a buffer of events locally. The buffer can be\n flushed and written to the global state store by calling flush_log().\n\n Args:\n event_type (str): The type of the event.\n contents: More general data to store with the event.\n kind (int): Either LOG_POINT, LOG_SPAN_START, or LOG_SPAN_END. This is\n LOG_POINT if the event being logged happens at a single point in\n time. It is LOG_SPAN_START if we are starting to log a span of\n time, and it is LOG_SPAN_END if we are finishing logging a span of\n time.\n \"\"\"\n # TODO(rkn): This code currently takes around half a microsecond. Since we\n # call it tens of times per task, this adds up. We will need to redo the\n # logging code, perhaps in C.\n contents = {} if contents is None else contents\n assert isinstance(contents, dict)\n # Make sure all of the keys and values in the dictionary are strings.\n contents = {str(k): str(v) for k, v in contents.items()}\n # Log the event if this is a worker and not a driver, since the driver's\n # event log never gets flushed.\n if worker.mode == WORKER_MODE:\n worker.events.append((time.time(), event_type, kind, contents))\n\n\ndef flush_log(worker=global_worker):\n \"\"\"Send the logged worker events to the global state store.\"\"\"\n event_log_key = b\"event_log:\" + worker.worker_id\n event_log_value = json.dumps(worker.events)\n worker.local_scheduler_client.log_event(event_log_key,\n event_log_value,\n time.time())\n worker.events = []\n\n\ndef get(object_ids, worker=global_worker):\n \"\"\"Get a remote object or a list of remote objects from the object store.\n\n This method blocks until the object corresponding to the object ID is\n available in the local object store. If this object is not in the local\n object store, it will be shipped from an object store that has it (once the\n object has been created). If object_ids is a list, then the objects\n corresponding to each object in the list will be returned.\n\n Args:\n object_ids: Object ID of the object to get or a list of object IDs to\n get.\n\n Returns:\n A Python object or a list of Python objects.\n \"\"\"\n check_connected(worker)\n with log_span(\"ray:get\", worker=worker):\n check_main_thread()\n\n if worker.mode == PYTHON_MODE:\n # In PYTHON_MODE, ray.get is the identity operation (the input will\n # actually be a value not an objectid).\n return object_ids\n if isinstance(object_ids, list):\n values = worker.get_object(object_ids)\n for i, value in enumerate(values):\n if isinstance(value, RayTaskError):\n raise RayGetError(object_ids[i], value)\n return values\n else:\n value = worker.get_object([object_ids])[0]\n if isinstance(value, RayTaskError):\n # If the result is a RayTaskError, then the task that created\n # this object failed, and we should propagate the error message\n # here.\n raise RayGetError(object_ids, value)\n return value\n\n\ndef put(value, worker=global_worker):\n \"\"\"Store an object in the object store.\n\n Args:\n value: The Python object to be stored.\n\n Returns:\n The object ID assigned to this value.\n \"\"\"\n check_connected(worker)\n with log_span(\"ray:put\", worker=worker):\n check_main_thread()\n\n if worker.mode == PYTHON_MODE:\n # In PYTHON_MODE, ray.put is the identity operation.\n return value\n object_id = worker.local_scheduler_client.compute_put_id(\n worker.current_task_id, worker.put_index)\n worker.put_object(object_id, value)\n worker.put_index += 1\n return object_id\n\n\ndef wait(object_ids, num_returns=1, timeout=None, worker=global_worker):\n \"\"\"Return a list of IDs that are ready and a list of IDs that are not.\n\n If timeout is set, the function returns either when the requested number of\n IDs are ready or when the timeout is reached, whichever occurs first. If it\n is not set, the function simply waits until that number of objects is ready\n and returns that exact number of objectids.\n\n This method returns two lists. The first list consists of object IDs that\n correspond to objects that are stored in the object store. The second list\n corresponds to the rest of the object IDs (which may or may not be ready).\n\n Args:\n object_ids (List[ObjectID]): List of object IDs for objects that may or\n may not be ready. Note that these IDs must be unique.\n num_returns (int): The number of object IDs that should be returned.\n timeout (int): The maximum amount of time in milliseconds to wait\n before returning.\n\n Returns:\n A list of object IDs that are ready and a list of the remaining object\n IDs.\n \"\"\"\n\n if isinstance(object_ids, ray.local_scheduler.ObjectID):\n raise TypeError(\n \"wait() expected a list of ObjectID, got a single ObjectID\")\n\n if not isinstance(object_ids, list):\n raise TypeError(\"wait() expected a list of ObjectID, got {}\".format(\n type(object_ids)))\n\n if worker.mode != PYTHON_MODE:\n for object_id in object_ids:\n if not isinstance(object_id, ray.local_scheduler.ObjectID):\n raise TypeError(\n \"wait() expected a list of ObjectID, \"\n \"got list containing {}\".format(type(object_id)))\n\n check_connected(worker)\n with log_span(\"ray:wait\", worker=worker):\n check_main_thread()\n\n # When Ray is run in PYTHON_MODE, all functions are run immediately,\n # so all objects in object_id are ready.\n if worker.mode == PYTHON_MODE:\n return object_ids[:num_returns], object_ids[num_returns:]\n\n # TODO(rkn): This is a temporary workaround for\n # https://github.com/ray-project/ray/issues/997. However, it should be\n # fixed in Arrow instead of here.\n if len(object_ids) == 0:\n return [], []\n\n object_id_strs = [plasma.ObjectID(object_id.id())\n for object_id in object_ids]\n timeout = timeout if timeout is not None else 2 ** 30\n ready_ids, remaining_ids = worker.plasma_client.wait(object_id_strs,\n timeout,\n num_returns)\n ready_ids = [ray.local_scheduler.ObjectID(object_id.binary())\n for object_id in ready_ids]\n remaining_ids = [ray.local_scheduler.ObjectID(object_id.binary())\n for object_id in remaining_ids]\n return ready_ids, remaining_ids\n\n\ndef format_error_message(exception_message, task_exception=False):\n \"\"\"Improve the formatting of an exception thrown by a remote function.\n\n This method takes a traceback from an exception and makes it nicer by\n removing a few uninformative lines and adding some space to indent the\n remaining lines nicely.\n\n Args:\n exception_message (str): A message generated by traceback.format_exc().\n\n Returns:\n A string of the formatted exception message.\n \"\"\"\n lines = exception_message.split(\"\\n\")\n if task_exception:\n # For errors that occur inside of tasks, remove lines 1, 2, 3, and 4,\n # which are always the same, they just contain information about the\n # main loop.\n lines = lines[0:1] + lines[5:]\n return \"\\n\".join(lines)\n\n\ndef _submit_task(function_id, args, worker=global_worker):\n \"\"\"This is a wrapper around worker.submit_task.\n\n We use this wrapper so that in the remote decorator, we can call\n _submit_task instead of worker.submit_task. The difference is that when we\n attempt to serialize remote functions, we don't attempt to serialize the\n worker object, which cannot be serialized.\n \"\"\"\n return worker.submit_task(function_id, args)\n\n\ndef _mode(worker=global_worker):\n \"\"\"This is a wrapper around worker.mode.\n\n We use this wrapper so that in the remote decorator, we can call _mode()\n instead of worker.mode. The difference is that when we attempt to serialize\n remote functions, we don't attempt to serialize the worker object, which\n cannot be serialized.\n \"\"\"\n return worker.mode\n\n\ndef export_remote_function(function_id, func_name, func, func_invoker,\n function_properties, worker=global_worker):\n check_main_thread()\n if _mode(worker) not in [SCRIPT_MODE, SILENT_MODE]:\n raise Exception(\"export_remote_function can only be called on a \"\n \"driver.\")\n\n worker.function_properties[\n worker.task_driver_id.id()][function_id.id()] = function_properties\n task_driver_id = worker.task_driver_id\n key = b\"RemoteFunction:\" + task_driver_id.id() + b\":\" + function_id.id()\n\n # Work around limitations of Python pickling.\n func_name_global_valid = func.__name__ in func.__globals__\n func_name_global_value = func.__globals__.get(func.__name__)\n # Allow the function to reference itself as a global variable\n if not is_cython(func):\n func.__globals__[func.__name__] = func_invoker\n try:\n pickled_func = pickle.dumps(func)\n finally:\n # Undo our changes\n if func_name_global_valid:\n func.__globals__[func.__name__] = func_name_global_value\n else:\n del func.__globals__[func.__name__]\n\n worker.redis_client.hmset(key, {\n \"driver_id\": worker.task_driver_id.id(),\n \"function_id\": function_id.id(),\n \"name\": func_name,\n \"module\": func.__module__,\n \"function\": pickled_func,\n \"num_return_vals\": function_properties.num_return_vals,\n \"resources\": json.dumps(function_properties.resources),\n \"max_calls\": function_properties.max_calls})\n worker.redis_client.rpush(\"Exports\", key)\n\n\ndef in_ipython():\n \"\"\"Return true if we are in an IPython interpreter and false otherwise.\"\"\"\n try:\n __IPYTHON__\n return True\n except NameError:\n return False\n\n\ndef compute_function_id(func_name, func):\n \"\"\"Compute an function ID for a function.\n\n Args:\n func_name: The name of the function (this includes the module name plus\n the function name).\n func: The actual function.\n\n Returns:\n This returns the function ID.\n \"\"\"\n function_id_hash = hashlib.sha1()\n # Include the function name in the hash.\n function_id_hash.update(func_name.encode(\"ascii\"))\n # If we are running a script or are in IPython, include the source code in\n # the hash. If we are in a regular Python interpreter we skip this part\n # because the source code is not accessible. If the function is a built-in\n # (e.g., Cython), the source code is not accessible.\n import __main__ as main\n if (hasattr(main, \"__file__\") or in_ipython()) \\\n and inspect.isfunction(func):\n function_id_hash.update(inspect.getsource(func).encode(\"ascii\"))\n # Compute the function ID.\n function_id = function_id_hash.digest()\n assert len(function_id) == 20\n function_id = FunctionID(function_id)\n\n return function_id\n\n\ndef remote(*args, **kwargs):\n \"\"\"This decorator is used to define remote functions and to define actors.\n\n Args:\n num_return_vals (int): The number of object IDs that a call to this\n function should return.\n num_cpus (int): The number of CPUs needed to execute this function.\n num_gpus (int): The number of GPUs needed to execute this function.\n resources: A dictionary mapping resource name to the required quantity\n of that resource.\n max_calls (int): The maximum number of tasks of this kind that can be\n run on a worker before the worker needs to be restarted.\n checkpoint_interval (int): The number of tasks to run between\n checkpoints of the actor state.\n \"\"\"\n worker = global_worker\n\n def make_remote_decorator(num_return_vals, resources, max_calls,\n checkpoint_interval, func_id=None):\n def remote_decorator(func_or_class):\n if inspect.isfunction(func_or_class) or is_cython(func_or_class):\n function_properties = FunctionProperties(\n num_return_vals=num_return_vals,\n resources=resources,\n max_calls=max_calls)\n return remote_function_decorator(func_or_class,\n function_properties)\n if inspect.isclass(func_or_class):\n return worker.make_actor(func_or_class, resources,\n checkpoint_interval)\n raise Exception(\"The @ray.remote decorator must be applied to \"\n \"either a function or to a class.\")\n\n def remote_function_decorator(func, function_properties):\n func_name = \"{}.{}\".format(func.__module__, func.__name__)\n if func_id is None:\n function_id = compute_function_id(func_name, func)\n else:\n function_id = func_id\n\n def func_call(*args, **kwargs):\n \"\"\"This runs immediately when a remote function is called.\"\"\"\n check_connected()\n check_main_thread()\n args = signature.extend_args(function_signature, args, kwargs)\n\n if _mode() == PYTHON_MODE:\n # In PYTHON_MODE, remote calls simply execute the function.\n # We copy the arguments to prevent the function call from\n # mutating them and to match the usual behavior of\n # immutable remote objects.\n result = func(*copy.deepcopy(args))\n return result\n objectids = _submit_task(function_id, args)\n if len(objectids) == 1:\n return objectids[0]\n elif len(objectids) > 1:\n return objectids\n\n def func_executor(arguments):\n \"\"\"This gets run when the remote function is executed.\"\"\"\n result = func(*arguments)\n return result\n\n def func_invoker(*args, **kwargs):\n \"\"\"This is used to invoke the function.\"\"\"\n raise Exception(\"Remote functions cannot be called directly. \"\n \"Instead of running '{}()', try '{}.remote()'.\"\n .format(func_name, func_name))\n func_invoker.remote = func_call\n func_invoker.executor = func_executor\n func_invoker.is_remote = True\n func_name = \"{}.{}\".format(func.__module__, func.__name__)\n func_invoker.func_name = func_name\n if sys.version_info >= (3, 0) or is_cython(func):\n func_invoker.__doc__ = func.__doc__\n else:\n func_invoker.func_doc = func.func_doc\n\n signature.check_signature_supported(func)\n function_signature = signature.extract_signature(func)\n\n # Everything ready - export the function\n if worker.mode in [SCRIPT_MODE, SILENT_MODE]:\n export_remote_function(function_id, func_name, func,\n func_invoker, function_properties)\n elif worker.mode is None:\n worker.cached_remote_functions_and_actors.append(\n (\"remote_function\", (function_id, func_name, func,\n func_invoker, function_properties)))\n return func_invoker\n\n return remote_decorator\n\n # Handle resource arguments\n num_cpus = kwargs[\"num_cpus\"] if \"num_cpus\" in kwargs else 1\n num_gpus = kwargs[\"num_gpus\"] if \"num_gpus\" in kwargs else 0\n resources = kwargs.get(\"resources\", {})\n if not isinstance(resources, dict):\n raise Exception(\"The 'resources' keyword argument must be a \"\n \"dictionary, but received type {}.\"\n .format(type(resources)))\n assert \"CPU\" not in resources, \"Use the 'num_cpus' argument.\"\n assert \"GPU\" not in resources, \"Use the 'num_gpus' argument.\"\n resources[\"CPU\"] = num_cpus\n resources[\"GPU\"] = num_gpus\n # Handle other arguments.\n num_return_vals = (kwargs[\"num_return_vals\"] if \"num_return_vals\"\n in kwargs else 1)\n max_calls = kwargs[\"max_calls\"] if \"max_calls\" in kwargs else 0\n checkpoint_interval = (kwargs[\"checkpoint_interval\"]\n if \"checkpoint_interval\" in kwargs else -1)\n\n if _mode() == WORKER_MODE:\n if \"function_id\" in kwargs:\n function_id = kwargs[\"function_id\"]\n return make_remote_decorator(num_return_vals, resources, max_calls,\n checkpoint_interval, function_id)\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # This is the case where the decorator is just @ray.remote.\n return make_remote_decorator(\n num_return_vals, resources,\n max_calls, checkpoint_interval)(args[0])\n else:\n # This is the case where the decorator is something like\n # @ray.remote(num_return_vals=2).\n error_string = (\"The @ray.remote decorator must be applied either \"\n \"with no arguments and no parentheses, for example \"\n \"'@ray.remote', or it must be applied using some of \"\n \"the arguments 'num_return_vals', 'resources', \"\n \"or 'max_calls', like \"\n \"'@ray.remote(num_return_vals=2, \"\n \"resources={\\\"GPU\\\": 1})'.\")\n assert len(args) == 0 and len(kwargs) > 0, error_string\n for key in kwargs:\n assert key in [\"num_return_vals\", \"num_cpus\", \"num_gpus\",\n \"resources\", \"max_calls\",\n \"checkpoint_interval\"], error_string\n assert \"function_id\" not in kwargs\n return make_remote_decorator(num_return_vals, resources, max_calls,\n checkpoint_interval)\n"
] | [
[
"numpy.random.bytes",
"numpy.random.get_state",
"numpy.random.set_state",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PranavEranki/BayesianSuperResolution | [
"ca983f814061b04c35b71d921acdfed1e30cddb6"
] | [
"main.py"
] | [
"from __future__ import print_function\nimport argparse\nfrom math import log10\nimport os\nimport math\nimport torch\n#import torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom model import BBBNet\nfrom utils.BBBlayers import GaussianVariationalInference\nfrom data import get_training_set, get_test_set\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Super Res Example')\nparser.add_argument('--upscale_factor', type=int, default=3, help=\"super resolution upscale factor\")\nparser.add_argument('--batch_size', type=int, default=1024, help='training batch size')\nparser.add_argument('--testBatchSize', type=int, default=10, help='testing batch size')\nparser.add_argument('--num_epochs', type=int, default=2000, help='number of epochs to train for')\nparser.add_argument('--num_samples', default=10, type=int, help='Number of samples')\nparser.add_argument('--beta_type', default=\"Blundell\", type=str, help='Beta type')\nparser.add_argument('--lr', type=float, default=0.01, help='Learning Rate. Default=0.01')\nparser.add_argument('--cuda', action='store_true', help='use cuda?', default=True)\nparser.add_argument('--threads', type=int, default=8, help='number of threads for data loader to use')\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n\n\nglobal opt\nopt = parser.parse_args()\nprint(opt)\n\nif opt.cuda and not torch.cuda.is_available():\n raise Exception(\"No GPU found, please run without --cuda\")\n\ntorch.manual_seed(opt.seed)\n\ndevice = torch.device(\"cuda\" if opt.cuda else \"cpu\")\n\nprint('===> Loading datasets')\ntrain_set = get_training_set(opt.upscale_factor)\ntest_set = get_test_set(opt.upscale_factor)\ntraining_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True)\ntesting_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)\n\nprint('===> Building model')\nmodel = BBBNet(upscale_factor=opt.upscale_factor).to(device)\n\nvi = GaussianVariationalInference(torch.nn.MSELoss())\n\noptimizer = optim.Adam(model.parameters(), lr=opt.lr)\n\n#load a model from checkpoint\n\nif opt.resume:\n if os.path.isfile(opt.resume):\n print(\"=> loading checkpoint '{}'\".format(opt.resume))\n checkpoint = torch.load(opt.resume)\n opt.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(opt.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(opt.resume))\n\n\n\ndef train(epoch):\n epoch_loss = 0\n m = math.ceil(len(train_set) / opt.batch_size)\n for iteration, batch in enumerate(training_data_loader, 1):\n input, target = batch[0].to(device), batch[1].to(device)\n\n\n\n if opt.beta_type is \"Blundell\":\n beta = 2 ** (m - (iteration + 1)) / (2 ** m - 1)\n elif opt.beta_type is \"Soenderby\":\n beta = min(epoch / (opt.num_epochs // 4), 1)\n elif opt.beta_type is \"Standard\":\n beta = 1 / m\n else:\n beta = 0\n\n outputs, kl = model.probforward(input)\n optimizer.zero_grad()\n loss = vi(outputs, target,kl,beta )\n epoch_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n print(\"===> Epoch[{}]({}/{}): Loss: {:.4f}\".format(epoch, iteration, len(training_data_loader), loss.item()))\n\n print(\"===> Epoch {} Complete: Avg. Loss: {:.4f}\".format(epoch, epoch_loss / len(training_data_loader)))\n\n\ndef test():\n avg_psnr = 0\n model.eval()\n m = math.ceil(len(train_set) / opt.batch_size)\n with torch.no_grad():\n for batch in testing_data_loader:\n input, target = batch[0].to(device), batch[1].to(device)\n\n if opt.beta_type is \"Blundell\":\n beta = 2 ** (m - (opt.testBatchSize + 1)) / (2 ** m - 1)\n elif opt.beta_type is \"Soenderby\":\n beta = min(epoch / (opt.num_epochs // 4), 1)\n elif opt.beta_type is \"Standard\":\n beta = 1 / m\n else:\n beta = 0\n\n prediction, kl = model.probforward(input)\n mse = vi(prediction, target, kl, beta)\n psnr = 10 * log10(1 / mse.item())\n avg_psnr += psnr\n print(\"===> Avg. PSNR: {:.4f} dB\".format(avg_psnr / len(testing_data_loader)))\n\n\ndef checkpoint(epoch):\n model_out_path = \"model_epoch_{}.pth\".format(epoch)\n torch.save(model, model_out_path)\n print(\"Checkpoint saved to {}\".format(model_out_path))\n\n\ndef save_checkpoint(state):\n model_out_path = \"./checkpoints/\"+\"model_epoch_{}.pth\".format(epoch)\n torch.save(state, model_out_path)\n print(\"Checkpoint saved to {}\".format(model_out_path))\n\n\nfor epoch in range(1, opt.num_epochs + 1):\n train(epoch)\n test()\n #checkpoint(epoch)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': model,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n })\n\n"
] | [
[
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.nn.MSELoss",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
srinathos/swa | [
"147a7e8aa780952a2ac680919661b71740b96833"
] | [
"activations.py"
] | [
"import torch\nimport torch.nn.functional as F\n\n\ndef chabanne_2(x):\n # x = x/2\n return 0.1992 + 0.5002 * x + 0.1997 * x ** 2\n\n\ndef chabanne_3(x):\n return 0.1995 + 0.5002 * x + 0.1994 * x ** 2 - 0.0164 * x ** 3\n\n\ndef chabanne_4(x):\n return 0.1500 + 0.5012 * x + 0.2981 * x ** 2 - 0.0004 * x ** 3 - 0.0388 * x ** 4\n\n\ndef chabanne_5(x):\n return 0.1488 + 0.4993 * x + 0.3007 * x ** 2 + 0.0003 * x ** 3 - 0.0168 * x ** 5\n\n\ndef chabanne_6(x):\n return 0.1249 + 0.5000 * x + 0.3729 * x ** 2 - 0.0410 * x ** 4 + 0.0016 * x ** 6\n\n\ndef d3_v1_pol(x):\n return 0.7 * x ** 3 + 0.8 * x ** 2 + 0.2 * x\n\n\ndef d3_v2_pol(x):\n return -0.4 * x ** 3 + 0.5 * x ** 2 + 0.9 * x\n\n\ndef softplus_integral(x):\n return -0.0005 * x ** 4 + 0.0000 * x ** 3 + 0.0815 * x ** 2 + 0.5000 * x + 0\n\nsoftplus = torch.nn.Softplus()\n\n\ndef custom_softplus(x):\n return x - softplus(x)\n\n\ndef hesam_sigmoid_integral(x):\n return -(x * (225234375 * x ** 3 + 443 * x ** 2 - 843750000000000 * x - 937500000000000000)) / 1875000000000000000\n\n\ndef bounded_step_activation(x):\n return torch.abs(x) * 0.5\n\n\ndef rectified_polynomial(x):\n # Rectifying all negative values first\n x = x.clamp(min=0)\n return d3_v2_pol(x)\n\n\ndef swish(x, beta=1):\n return x * F.sigmoid(beta * x)\n\n\ndef periodic_cos(x):\n return torch.cos(x) - x\n\n\ndef periodic_cos_mod(x):\n return torch.cos(0.2 * x) - (0.2 * x)\n\n\ndef softplus_polynomial(x):\n return -8.043291176102489*10**-14*x**9 -5.409176004846577*10**-11*x**8 +1.464006789445581*10**-10*x**7 +1.2094736421337893*10**-7*x**6 -8.68650047151514*10**-8*x**5 -9.849521136327391*10**-5*x**4 +1.8543655255840298*10**-5*x**3 +0.045459999581864446*x**2 +0.4989722694638288*x +1.1980867140213445\n"
] | [
[
"torch.abs",
"torch.nn.functional.sigmoid",
"torch.nn.Softplus",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sangyi92/alphago_zero | [
"6acd092e9b904eb725da031f8c5f8b4e1bdae111"
] | [
"AlphagoZero/mcts.py"
] | [
"\"\"\"Monte Carlo Tree Search, as described in Silver et al 2017.\n\nThis is a \"pure\" implementation of the AlphaGo MCTS algorithm in that it is not specific to the\ngame of Go; everything in this file is implemented generically with respect to some state, actions,\npolicy function, and value function.\n\"\"\"\nimport numpy as np\nfrom operator import itemgetter\n#import sys\n#sys.setrecursionlimit(1500)\n\nclass TreeNode(object):\n \"\"\"A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and\n its visit-count-adjusted prior score u.\n \"\"\"\n\n def __init__(self, parent, prior_p):\n self._parent = parent\n self._children = {} # a map from action to TreeNode\n self._n_visits = 0\n self._W = 0 # This was not in the original code.\n self._Q = 0\n # This value for u will be overwritten in the first call to update(), but is useful for\n # choosing the first action from this node.\n self._u = prior_p\n self._P = prior_p\n\n def printing(self):\n print(self._children)\n for _, child in self._children.iteritems():\n child.printing()\n\n def expand(self, action_priors):\n \"\"\"Expand tree by creating new children.\n\n Arguments:\n action_priors -- output from policy function - a list of tuples of actions and their prior\n probability according to the policy function.\n\n Returns:\n None\n \"\"\"\n for action, prob in action_priors:\n if action not in self._children:\n self._children[action] = TreeNode(self, prob)\n\n def select(self):\n \"\"\"Select action among children that gives maximum action value, Q plus bonus u(P).\n\n Returns:\n A tuple of (action, next_node)\n \"\"\"\n return max(self._children.iteritems(), key=lambda act_node: act_node[1].get_value())\n\n def update(self, leaf_value, c_puct):\n \"\"\"Update node values from leaf evaluation.\n\n Arguments:\n leaf_value -- the value of subtree evaluation from the current player's perspective.\n c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and\n prior probability, P, on this node's score.\n\n Returns:\n None\n \"\"\"\n # Count visit.\n self._n_visits += 1\n # Update W\n self._W += leaf_value\n # Update Q, a running average of values for all visits.\n self._Q = self._W / self._n_visits\n # Update u, the prior weighted by an exploration hyperparameter c_puct and the number of\n # visits. Note that u is not normalized to be a distribution.\n if not self.is_root():\n self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)\n\n def update_recursive(self, leaf_value, c_puct):\n \"\"\"Like a call to update(), but applied recursively for all ancestors.\n\n Note: it is important that this happens from the root downward so that 'parent' visit\n counts are correct.\n \"\"\"\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(leaf_value, c_puct)\n self.update(leaf_value, c_puct)\n\n def get_value(self):\n \"\"\"Calculate and return the value for this node: a combination of leaf evaluations, Q, and\n this node's prior adjusted for its visit count, u\n \"\"\"\n return self._Q + self._u\n\n def is_leaf(self):\n \"\"\"Check if leaf node (i.e. no nodes below this have been expanded).\n \"\"\"\n return self._children == {}\n\n def is_root(self):\n return self._parent is None\n\n\nclass MCTS(object):\n \"\"\"A simple (and slow) single-threaded implementation of Monte Carlo Tree Search.\n\n Search works by exploring moves randomly according to the given policy up to a certain\n depth, which is relatively small given the search space. \"Leaves\" at this depth are assigned a\n value by the value function evaluated at that leaf.The probability of revisiting a node changes\n over the course of the many playouts according to its estimated value.\n Ultimately the node which is chosen based on the number of visits is returned as the next action.\n\n The term \"playout\" refers to a single search from the root.\n \"\"\"\n\n def __init__(self, value_fn, policy_fn, c_puct=5, n_playout=1600):\n \"\"\"Arguments:\n value_fn -- a function that takes in a state and ouputs a score in [-1, 1], i.e. the\n expected value of the end game score from the current player's perspective.\n policy_fn -- a function that takes in a state and outputs a list of (action, probability)\n tuples for the current player.\n c_puct -- a number in (0, inf) that controls how quickly exploration converges to the\n maximum-value policy, where a higher value means relying on the prior more, and\n should be used only in conjunction with a large value for n_playout.\n \"\"\"\n self._root = TreeNode(None, 1.0)\n self._value = value_fn\n self._policy = policy_fn\n self._c_puct = c_puct\n self._n_playout = n_playout\n\n def _playout(self, state, self_play):\n \"\"\"Run a single playout from the root to the given depth, getting a value at the leaf and\n propagating it back through its parents. State is modified in-place, so a copy must be\n provided.\n\n Arguments:\n state -- a copy of the state.\n self_play -- whether this is on self_play or not\n\n Returns:\n None\n \"\"\"\n node = self._root\n if not node.is_leaf() and self_play:\n etas = np.random.dirichlet([0.03 for _ in range(len(node._children.items()))],1)[0]\n j = 0\n #a, c = map(list, zip(*node._children.items()))\n #print(\"first\")\n #print(zip(a, [_c._P for _c in c]))\n for action, child_node in node._children.iteritems():\n child_node._P = 0.75*child_node._P + 0.25*etas[j]\n j += 1\n #a, c = map(list, zip(*node._children.items()))\n #print(\"second\")\n #print(zip(a, [_c._P for _c in c]))\n\n while True:\n # Only expand node if it has not already been done. Existing nodes already know their\n # prior.\n if node.is_leaf():\n action_probs = self._policy(state)\n # Check for end of game.\n if len(action_probs) == 0:\n break\n if node.is_root() and self_play:\n etas = np.random.dirichlet([0.03 for _ in range(len(action_probs))],1)[0]\n j = 0\n new_action_probs = []\n for action, prob in action_probs:\n prob = 0.75*prob + 0.25*etas[j]\n new_action_probs.append((action, prob))\n j += 1\n action_probs = new_action_probs\n node.expand(action_probs)\n break\n # Greedily select next move.\n action, node = node.select()\n state.do_move(action)\n\n # Evaluate the leaf using value function which is the subnetwork of policy_value network\n leaf_value = self._value(state)\n\n # Update value and visit count of nodes in this traversal.\n node.update_recursive(leaf_value, self._c_puct)\n\n\n def get_move(self, state, temperature, self_play):\n \"\"\"Runs all playouts sequentially and returns the action based on exponentiated visit count.\n\n Arguments:\n state -- the current state, including both game state and the current player.\n\n Returns:\n the selected action\n \"\"\"\n for n in range(self._n_playout):\n state_copy = state.copy()\n self._playout(state_copy, self_play)\n\n # action is chosen proportional to its exponentiated visit count\n if temperature > 0:\n childrens = self._root._children.items()\n actions, next_states = map(list, zip(*childrens))\n exponentiated_n_visits = np.power([next_state._n_visits for next_state in next_states],1./temperature)\n pi = np.divide(exponentiated_n_visits, np.sum(exponentiated_n_visits))\n child_idx = range(len(childrens))\n child_idx = np.random.choice(child_idx, p = pi)\n return actions[child_idx]\n else : # when temperature is infinitesimal\n return max(self._root._children.iteritems(), key=lambda act_node: act_node[1]._n_visits)[0]\n\n def update_with_move(self, last_move):\n \"\"\"Step forward in the tree, keeping everything we already know about the subtree, assuming\n that get_move() has been called already. Siblings of the new root will be garbage-collected.\n \"\"\"\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)\n\n\nclass ParallelMCTS(MCTS):\n pass\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.random.choice",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
EricCousineau-TRI/ravens | [
"d7f9db3214ed730c6d16e5c248684688555c6d23"
] | [
"ravens/tasks/block_insertion.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Ravens Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Insertion Tasks.\"\"\"\n\nimport numpy as np\nfrom ravens.tasks.task import Task\nfrom ravens.utils import utils\n\nimport pybullet as p\n\n\nclass BlockInsertion(Task):\n \"\"\"Insertion Task - Base Variant.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.max_steps = 3\n\n def reset(self, env):\n super().reset(env)\n block_id = self.add_block(env)\n targ_pose = self.add_fixture(env)\n # self.goals.append(\n # ([block_id], [2 * np.pi], [[0]], [targ_pose], 'pose', None, 1.))\n self.goals.append(([(block_id, (2 * np.pi, None))], np.int32([[1]]),\n [targ_pose], False, True, 'pose', None, 1))\n\n def add_block(self, env):\n \"\"\"Add L-shaped block.\"\"\"\n size = (0.1, 0.1, 0.04)\n urdf = 'insertion/ell.urdf'\n pose = self.get_random_pose(env, size)\n return env.add_object(urdf, pose)\n\n def add_fixture(self, env):\n \"\"\"Add L-shaped fixture to place block.\"\"\"\n size = (0.1, 0.1, 0.04)\n urdf = 'insertion/fixture.urdf'\n pose = self.get_random_pose(env, size)\n env.add_object(urdf, pose, 'fixed')\n return pose\n\n\nclass BlockInsertionTranslation(BlockInsertion):\n \"\"\"Insertion Task - Translation Variant.\"\"\"\n\n def get_random_pose(self, env, obj_size):\n pose = super(BlockInsertionTranslation, self).get_random_pose(env, obj_size)\n pos, rot = pose\n rot = utils.eulerXYZ_to_quatXYZW((0, 0, np.pi / 2))\n return pos, rot\n\n # Visualization positions.\n # block_pos = (0.40, -0.15, 0.02)\n # fixture_pos = (0.65, 0.10, 0.02)\n\n\nclass BlockInsertionEasy(BlockInsertionTranslation):\n \"\"\"Insertion Task - Easy Variant.\"\"\"\n\n def add_block(self, env):\n \"\"\"Add L-shaped block in fixed position.\"\"\"\n # size = (0.1, 0.1, 0.04)\n urdf = 'insertion/ell.urdf'\n pose = ((0.5, 0, 0.02), p.getQuaternionFromEuler((0, 0, np.pi / 2)))\n return env.add_object(urdf, pose)\n\n\nclass BlockInsertionSixDof(BlockInsertion):\n \"\"\"Insertion Task - 6DOF Variant.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.sixdof = True\n self.pos_eps = 0.02\n\n def add_fixture(self, env):\n \"\"\"Add L-shaped fixture to place block.\"\"\"\n size = (0.1, 0.1, 0.04)\n urdf = 'insertion/fixture.urdf'\n pose = self.get_random_pose_6dof(env, size)\n env.add_object(urdf, pose, 'fixed')\n return pose\n\n def get_random_pose_6dof(self, env, obj_size):\n pos, rot = super(BlockInsertionSixDof, self).get_random_pose(env, obj_size)\n z = (np.random.rand() / 10) + 0.03\n pos = (pos[0], pos[1], obj_size[2] / 2 + z)\n roll = (np.random.rand() - 0.5) * np.pi / 2\n pitch = (np.random.rand() - 0.5) * np.pi / 2\n yaw = np.random.rand() * 2 * np.pi\n rot = utils.eulerXYZ_to_quatXYZW((roll, pitch, yaw))\n return pos, rot\n\n\nclass BlockInsertionNoFixture(BlockInsertion):\n \"\"\"Insertion Task - No Fixture Variant.\"\"\"\n\n def add_fixture(self, env):\n \"\"\"Add target pose to place block.\"\"\"\n size = (0.1, 0.1, 0.04)\n # urdf = 'insertion/fixture.urdf'\n pose = self.get_random_pose(env, size)\n return pose\n\n # def reset(self, env, last_info=None):\n # self.num_steps = 1\n # self.goal = {'places': {}, 'steps': []}\n\n # # Add L-shaped block.\n # block_size = (0.1, 0.1, 0.04)\n # block_urdf = 'insertion/ell.urdf'\n # block_pose = self.get_random_pose(env, block_size)\n # block_id = env.add_object(block_urdf, block_pose)\n # self.goal['steps'].append({block_id: (2 * np.pi, [0])})\n\n # # Add L-shaped target pose, but without actually adding it.\n # if self.goal_cond_testing:\n # assert last_info is not None\n # self.goal['places'][0] = self._get_goal_info(last_info)\n # # print('\\nin insertion reset, goal: {}'.format(self.goal['places'][0]))\n # else:\n # hole_pose = self.get_random_pose(env, block_size)\n # self.goal['places'][0] = hole_pose\n # # print('\\nin insertion reset, goal: {}'.format(hole_pose))\n\n # def _get_goal_info(self, last_info):\n # \"\"\"Used to determine the goal given the last `info` dict.\"\"\"\n # position, rotation, _ = last_info[4] # block ID=4\n # return (position, rotation)\n"
] | [
[
"numpy.int32",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ThomasAldheimerFCG/continued-functions | [
"3285c570cfffd6fa6bc7e5264e843b85602b0487"
] | [
"continued_functions.py"
] | [
"import numpy as np\r\nimport cmath as cm\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nimport os\r\n\r\ncolor_codes = {1: 'red', 2: 'yellow', 3: 'blue', 4: 'green', 5: 'magenta', 6: 'crimson', 7: 'violet', 8: 'gold',\r\n 9: 'palegreen', 10: 'orange', 11: 'skyblue', 12: 'purple', 13: 'aqua', 14: 'pink', 15: 'lime',\r\n 16: 'mistyrose', 0: 'white'}\r\n\r\npatches = {1: mpatches.Patch(color=color_codes[1], label='1 lp'),\r\n 2: mpatches.Patch(color=color_codes[2], label='2 lp'),\r\n 3: mpatches.Patch(color=color_codes[3], label='3 lp'),\r\n 4: mpatches.Patch(color=color_codes[4], label='4 lp'),\r\n 5: mpatches.Patch(color=color_codes[5], label='5 lp'),\r\n 6: mpatches.Patch(color=color_codes[6], label='6 lp'),\r\n 7: mpatches.Patch(color=color_codes[7], label='7 lp'),\r\n 8: mpatches.Patch(color=color_codes[8], label='8 lp'),\r\n 9: mpatches.Patch(color=color_codes[9], label='9 lp'),\r\n 10: mpatches.Patch(color=color_codes[10], label='10 lp'),\r\n 11: mpatches.Patch(color=color_codes[11], label='11 lp'),\r\n 12: mpatches.Patch(color=color_codes[12], label='12 lp'),\r\n 13: mpatches.Patch(color=color_codes[13], label='13 lp'),\r\n 14: mpatches.Patch(color=color_codes[14], label='14 lp'),\r\n 15: mpatches.Patch(color=color_codes[15], label='15 lp'),\r\n 16: mpatches.Patch(color=color_codes[16], label='16 lp'),\r\n 0: mpatches.Patch(color=color_codes[0], label='undef')}\r\n\r\n\r\ndef continued_function(z, n, function):\r\n \"\"\"\r\n Return a sequence of n iterations of a continued function for a value z\r\n \"\"\"\r\n if function == 'exp':\r\n def f(x):\r\n return cm.exp(x*z)\r\n elif function == 'log':\r\n def f(x):\r\n return cm.log(x*z)\r\n elif function == 'sqrt':\r\n def f(x):\r\n return cm.sqrt(x*z)\r\n elif function == 'cube_root':\r\n def f(x):\r\n return (x*z)**(1.0/3.0)\r\n elif function == 'sine':\r\n def f(x):\r\n return cm.sin(x*z)\r\n elif function == 'cosine':\r\n def f(x):\r\n return cm.cos(x*z)\r\n elif function == 'tan':\r\n def f(x):\r\n return cm.tan(x*z)\r\n elif function == 'arcsine':\r\n def f(x):\r\n return cm.asin(pade[k]*z)\r\n elif function == 'arccosine':\r\n def f(x):\r\n return cm.acos(pade[k]*z)\r\n elif function == 'arctan':\r\n def f(x):\r\n return cm.atan(pade[k]*z)\r\n elif function == 'sineh':\r\n def f(x):\r\n return cm.sinh(x*z)\r\n elif function == 'cosineh':\r\n def f(x):\r\n return cm.cosh(x*z)\r\n elif function == 'tanh':\r\n def f(x):\r\n return cm.tanh(x*z)\r\n elif function == 'arcsineh':\r\n def f(x):\r\n return cm.asinh(x*z)\r\n elif function == 'arccosineh':\r\n def f(x):\r\n return cm.acosh(x*z)\r\n elif function == 'arctanh':\r\n def f(x):\r\n return cm.atanh(x*z)\r\n elif function == 'exp_sin':\r\n def f(x):\r\n return cm.exp(cm.sin(x*z))\r\n elif function == 'sin_exp':\r\n def f(x):\r\n return cm.sin(cm.exp(x*z))\r\n elif function == 'log_sin':\r\n def f(x):\r\n return cm.log(cm.sin(x*z))\r\n elif function == 'sin_log':\r\n def f(x):\r\n return cm.sin(cm.log(x*z))\r\n elif function == 'cos_sin':\r\n def f(x):\r\n return cm.cos(cm.sin(x*z))\r\n elif function == 'fraction_1':\r\n def f(x):\r\n return 1/(1+x*z)\r\n elif function == 'fraction_2':\r\n def f(x):\r\n return z/(1+x*z)\r\n else:\r\n raise ValueError(\"Incorrect function: {}\". format(function))\r\n pade = [1]\r\n try:\r\n for k in range(n):\r\n z_next = f(pade[k])\r\n pade.append(z_next)\r\n return pade\r\n except (OverflowError, ValueError):\r\n return [0]\r\n\r\n\r\ndef test_limit_points(sequence, max_number_of_lp):\r\n \"\"\"\r\n Return the number (color coded) of limit points in a sequence.\r\n Points within a distance epsilon are indistinguishable.\r\n \"\"\"\r\n if len(sequence) == 1:\r\n return 0 # undefined\r\n epsilon = 1e-10\r\n try:\r\n for i in range(2, max_number_of_lp+2):\r\n if abs(sequence[-1]-sequence[-i]) < epsilon:\r\n return i-1\r\n return 0 # undefined\r\n except (OverflowError, ValueError):\r\n return 0 # undefined\r\n\r\n\r\ndef scatter(function, list_lp, max_number_of_lp, a_re, b_re, a_im, b_im, legend):\r\n \"\"\"\r\n Generate a scatter plot for each number of limit points (each color code).\r\n \"\"\"\r\n fig = plt.figure(figsize=(16, 16), dpi=2000)\r\n ax = fig.add_subplot(111, facecolor=color_codes[0])\r\n\r\n for lp in range(1, max_number_of_lp+1):\r\n x = []\r\n y = []\r\n color = color_codes[lp] # Use predefined dictionary with color codes\r\n for k in range(len(list_lp)):\r\n if list_lp[k][2] == lp:\r\n x.append(list_lp[k][0])\r\n y.append(list_lp[k][1])\r\n ax.scatter(x, y, s=2, color=color, marker=',', lw=0, alpha=1)\r\n ax.set_aspect('equal', 'box')\r\n plt.xlim(a_re, b_re)\r\n plt.ylim(a_im, b_im)\r\n plt.xlabel('Re')\r\n plt.ylabel('Im')\r\n if legend:\r\n list_of_handles = [patches[i] for i in range(max_number_of_lp+1)]\r\n plt.legend(bbox_to_anchor=(1.0, 1.0), handles=list_of_handles)\r\n fig.tight_layout()\r\n if not os.path.exists('output'):\r\n os.makedirs('output')\r\n plt.savefig(\"output/{}_{}_lp.png\".format(function, max_number_of_lp))\r\n\r\n return None\r\n\r\n\r\ndef generate_picture(function, max_number_of_lp):\r\n assert isinstance(max_number_of_lp, int), \"max_number_of_lp must be of type int\"\r\n assert 17 > max_number_of_lp >= 1, \"max_number_of_lp must be 17 > max_number_of_lp >= 1\"\r\n\r\n N = 2000 # num in linspace (\"resolution\" in picture)\r\n n = 800 # number of terms in pade sequence\r\n\r\n a_Re = -4.0 # start value Real axis\r\n b_Re = 4.0 # end value Real axis\r\n Re_x = np.linspace(a_Re, b_Re, N)\r\n\r\n a_Im = -4.0 # start value Imaginary axis\r\n b_Im = 4.0 # end value Imaginary axis\r\n Im_y = np.linspace(a_Im, b_Im, N)\r\n\r\n list_of_limit_points = []\r\n for k in range(N):\r\n for i in range(N):\r\n z = complex(Re_x[i], Im_y[k])\r\n no_of_limit_points = test_limit_points(sequence=continued_function(z=z, n=n, function=function),\r\n max_number_of_lp=max_number_of_lp)\r\n list_of_limit_points.append([Re_x[i], Im_y[k], no_of_limit_points])\r\n\r\n scatter(function=function,\r\n list_lp=list_of_limit_points,\r\n max_number_of_lp=max_number_of_lp,\r\n a_re=a_Re, b_re=b_Re, a_im=a_Im, b_im=b_Im,\r\n legend=False)\r\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guanghaoyin/CVRKD-IQA | [
"b596a53c064d5472feb63fc61abe0b100e40606f"
] | [
"models/DCNN_NARIQA.py"
] | [
"import torch\nimport torchvision\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nclass DCNN_NARIQA(nn.Module):\n def __init__(self):\n super().__init__()\n #ref path\n self.block1_ref = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=4),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=5, stride=1))\n self.block2_ref = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=32, kernel_size=9, stride=1),\n nn.ReLU())\n self.fc3_ref = nn.Linear(in_features=59168, out_features=1024)\n\n #LQ path\n self.block1_lq = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=4),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=5, stride=1))\n self.block2_lq = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=32, kernel_size=9, stride=1),\n nn.ReLU())\n self.fc3_lq = nn.Linear(in_features=59168, out_features=1024)\n\n self.fc4 = nn.Linear(in_features=2048, out_features=1024)\n self.fc5 = nn.Linear(in_features=1024, out_features=1)\n \n def forward(self, lq_patches, ref_patches):\n feature_lq = self.block1_lq(lq_patches)\n feature_lq = self.block2_lq(feature_lq)\n feature_lq = self.fc3_lq(feature_lq.view(feature_lq.size(0), -1))\n\n feature_ref = self.block1_ref(ref_patches)\n feature_ref = self.block2_ref(feature_ref)\n feature_ref = self.fc3_ref(feature_ref.view(feature_ref.size(0), -1))\n\n concat_feature = torch.cat((feature_ref, feature_lq), 1)\n concat_feature = self.fc4(concat_feature)\n pred = self.fc5(concat_feature)\n return pred\n\nif __name__ == \"__main__\":\n x = torch.rand((1,3,224,224))\n y = torch.rand((1,3,224,224))\n net = DCNN_NARIQA()\n pred = net(x, y)\n"
] | [
[
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.rand",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shen0000/Huggingface-DeBERTa | [
"b8553e57d722c3364ba5f940e7e8a5c95f13880d"
] | [
"examples/tensorflow/multiple-choice/run_swag.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for multiple choice.\n\"\"\"\n# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Optional\n\nimport datasets\nimport numpy as np\nimport tensorflow as tf\nfrom datasets import load_dataset\n\nimport transformers\nfrom transformers import (\n CONFIG_NAME,\n TF2_WEIGHTS_NAME,\n AutoConfig,\n AutoTokenizer,\n HfArgumentParser,\n TFAutoModelForMultipleChoice,\n TFTrainingArguments,\n create_optimizer,\n set_seed,\n)\nfrom transformers.utils import check_min_version\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.15.0\")\n\nlogger = logging.getLogger(__name__)\n\n\n# region Helper classes and functions\nclass SavePretrainedCallback(tf.keras.callbacks.Callback):\n # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary\n # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback\n # that saves the model with this method after each epoch.\n def __init__(self, output_dir, **kwargs):\n super().__init__()\n self.output_dir = output_dir\n\n def on_epoch_end(self, epoch, logs=None):\n self.model.save_pretrained(self.output_dir)\n\n\ndef convert_dataset_for_tensorflow(\n dataset, non_label_column_names, batch_size, dataset_mode=\"variable_batch\", shuffle=True, drop_remainder=True\n):\n \"\"\"Converts a Hugging Face dataset to a Tensorflow Dataset. The dataset_mode controls whether we pad all batches\n to the maximum sequence length, or whether we only pad to the maximum length within that batch. The former\n is most useful when training on TPU, as a new graph compilation is required for each sequence length.\n \"\"\"\n\n def densify_ragged_batch(features, label=None):\n features = {\n feature: ragged_tensor.to_tensor(shape=batch_shape[feature]) for feature, ragged_tensor in features.items()\n }\n if label is None:\n return features\n else:\n return features, label\n\n feature_keys = list(set(dataset.features.keys()) - set(non_label_column_names + [\"label\"]))\n if dataset_mode == \"variable_batch\":\n batch_shape = {key: None for key in feature_keys}\n data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}\n elif dataset_mode == \"constant_batch\":\n data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}\n batch_shape = {\n key: tf.concat(([batch_size], ragged_tensor.bounding_shape()[1:]), axis=0)\n for key, ragged_tensor in data.items()\n }\n else:\n raise ValueError(\"Unknown dataset mode!\")\n\n if \"label\" in dataset.features:\n labels = tf.convert_to_tensor(np.array(dataset[\"label\"]))\n tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))\n else:\n tf_dataset = tf.data.Dataset.from_tensor_slices(data)\n if shuffle:\n tf_dataset = tf_dataset.shuffle(buffer_size=len(dataset))\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF\n tf_dataset = (\n tf_dataset.with_options(options)\n .batch(batch_size=batch_size, drop_remainder=drop_remainder)\n .map(densify_ragged_batch)\n )\n return tf_dataset\n\n\n# endregion\n\n# region Arguments\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a text file).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_seq_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. If passed, sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to the maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n\n def __post_init__(self):\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n\n# endregion\n\n\ndef main():\n # region Argument parsing\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n output_dir = Path(training_args.output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n # endregion\n\n # region Logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n # endregion\n\n # region Checkpoints\n checkpoint = None\n if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:\n if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():\n checkpoint = output_dir\n logger.info(\n f\"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this\"\n \" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n else:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to continue regardless.\"\n )\n # endregion\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # region Load datasets\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n\n # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n # 'text' is found. You can easily tweak this behavior (see below).\n\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.train_file is not None or data_args.validation_file is not None:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.train_file.split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)\n else:\n # Downloading and loading the swag dataset from the hub.\n raw_datasets = load_dataset(\"swag\", \"regular\", cache_dir=model_args.cache_dir)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # When using your own dataset or a different dataset from swag, you will probably need to change this.\n ending_names = [f\"ending{i}\" for i in range(4)]\n context_name = \"sent1\"\n question_header_name = \"sent2\"\n # endregion\n\n # region Load model config and tokenizer\n if checkpoint is not None:\n config_path = training_args.output_dir\n elif model_args.config_name:\n config_path = model_args.config_name\n else:\n config_path = model_args.model_name_or_path\n\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n config_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # endregion\n\n # region Dataset preprocessing\n if data_args.max_seq_length is None:\n max_seq_length = tokenizer.model_max_length\n if max_seq_length > 1024:\n logger.warning(\n f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n \"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.\"\n )\n max_seq_length = 1024\n else:\n if data_args.max_seq_length > tokenizer.model_max_length:\n logger.warning(\n f\"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the\"\n f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n )\n max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)\n\n def preprocess_function(examples):\n first_sentences = [[context] * 4 for context in examples[context_name]]\n question_headers = examples[question_header_name]\n second_sentences = [\n [f\"{header} {examples[end][i]}\" for end in ending_names] for i, header in enumerate(question_headers)\n ]\n\n # Flatten out\n first_sentences = list(chain(*first_sentences))\n second_sentences = list(chain(*second_sentences))\n\n # Tokenize\n tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length)\n # Un-flatten\n data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}\n return data\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n non_label_columns = [feature for feature in train_dataset.features if feature not in (\"label\", \"labels\")]\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_eval:\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if not training_args.do_train:\n non_label_columns = [feature for feature in eval_dataset.features if feature not in (\"label\", \"labels\")]\n if data_args.max_eval_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n # endregion\n\n with training_args.strategy.scope():\n # region Build model\n if checkpoint is None:\n model_path = model_args.model_name_or_path\n else:\n model_path = checkpoint\n model = TFAutoModelForMultipleChoice.from_pretrained(\n model_path,\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n num_replicas = training_args.strategy.num_replicas_in_sync\n total_train_batch_size = training_args.per_device_train_batch_size * num_replicas\n total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas\n if training_args.do_train:\n total_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs)\n optimizer, lr_schedule = create_optimizer(\n init_lr=training_args.learning_rate, num_train_steps=int(total_train_steps), num_warmup_steps=0\n )\n else:\n optimizer = \"adam\" # Just put anything in here, since we're not using it anyway\n model.compile(\n optimizer=optimizer,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name=\"accuracy\")],\n )\n # endregion\n\n # region Training\n if training_args.do_train:\n tf_train_dataset = convert_dataset_for_tensorflow(\n train_dataset, non_label_column_names=non_label_columns, batch_size=total_train_batch_size\n )\n if training_args.do_eval:\n validation_data = convert_dataset_for_tensorflow(\n eval_dataset, non_label_column_names=non_label_columns, batch_size=total_eval_batch_size\n )\n else:\n validation_data = None\n model.fit(\n tf_train_dataset,\n validation_data=validation_data,\n epochs=int(training_args.num_train_epochs),\n callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)],\n )\n # endregion\n\n # region Evaluation\n if training_args.do_eval and not training_args.do_train:\n # Do a standalone evaluation pass\n tf_eval_dataset = convert_dataset_for_tensorflow(\n eval_dataset, non_label_column_names=non_label_columns, batch_size=total_eval_batch_size\n )\n model.evaluate(tf_eval_dataset)\n # endregion\n\n # region Push to hub\n if training_args.push_to_hub:\n model.push_to_hub(\n finetuned_from=model_args.model_name_or_path,\n tasks=\"multiple-choice\",\n dataset_tags=\"swag\",\n dataset_args=\"regular\",\n dataset=\"SWAG\",\n language=\"en\",\n )\n # endregion\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.ragged.constant",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.data.Options",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sgraetzer/HearingLossSimulator | [
"26fc199b95b10fa97b7fda643595f1d37c419b33"
] | [
"hearinglosssimulator/pyacqnodes.py"
] | [
"from pyqtgraph.Qt import QtCore\nimport pyqtgraph as pg\nimport numpy as np\n\ntry:\n import pyacq\n HAS_PYACQ = True\nexcept ImportError:\n HAS_PYACQ = False\n\n\n\n\nclass Mutex(QtCore.QMutex):\n def __exit__(self, *args):\n self.unlock()\n\n def __enter__(self):\n self.lock()\n return self \n\nclass NodeThread(pyacq.ThreadPollInput):\n def __init__(self, input_stream, output_stream, proccesing_func, timeout = 200, parent = None):\n pyacq.ThreadPollInput.__init__(self, input_stream, timeout = timeout, return_data=True, parent = parent)\n self.output_stream = output_stream\n self.proccesing_func = proccesing_func\n\n def process_data(self, pos, data):\n pos2, processed_data = self.proccesing_func(pos, data)\n if pos2 is not None:\n self.output_stream.send(processed_data, index=pos2)\n\nclass BaseProcessingNode(pyacq.Node, QtCore.QObject):\n _input_specs = {'signals' : dict(streamtype = 'signals')}\n _output_specs = {'signals' : dict(streamtype = 'signals')}\n \n def __init__(self, parent = None, **kargs):\n QtCore.QObject.__init__(self, parent)\n pyacq.Node.__init__(self, **kargs)\n\n \n def after_input_connect(self, inputname):\n # this automatically propagate 'sample_rate', 'dtype', 'shape'\n # to output spec\n # in case of a Node that change sample_rate or the number of channel \n # this must be overwirtten\n self.nb_channel = self.input.params['shape'][1]\n for k in ['sample_rate', 'dtype', 'shape']:\n self.outputs['signals'].spec[k] = self.input.params[k]\n \n \n def _initialize(self):\n print('_initialize')\n self.thread = NodeThread(self.input, self.outputs['signals'], self.proccesing_func)\n \n def _start(self):\n self.thread.last_pos = None\n self.thread.start()\n \n def _stop(self):\n self.thread.stop()\n self.thread.wait()\n \n def proccesing_func(self, pos, data):\n raise(NotImplementedError)\n \n\n\n\nclass BasePyacqNode(BaseProcessingNode):\n _processing_class = None\n\n \n def _configure(self, **params):\n \"\"\"\n Parameters for configure\n -------\n coefficients: per channel sos filter coefficient shape (nb_channel, nb_section, 6)\n \n \"\"\"\n \n self.params = params\n self.debug_mode = self.params.get('debug_mode', False)\n \n\n\n \n def after_input_connect(self, inputname):\n BaseProcessingNode.after_input_connect(self, inputname)\n\n self.nb_channel = self.input.params['shape'][1]\n self.sample_rate = self.input.params['sample_rate']\n self.dtype = self.input.params['dtype']\n \n # maybe patch here to test if harware support float64\n assert self.input.params['dtype'] == np.dtype('float32')\n\n \n \n for k in ['sample_rate', 'dtype', 'shape']:\n self.outputs['signals'].spec[k] = self.input.params[k]\n \n total_channel = self.params['nb_freq_band']*self.nb_channel\n \n if self.debug_mode:\n steps = ['pgc1', 'levels', 'hpaf', 'pgc2', 'passive']\n for step in steps:\n self._output_specs[step] = dict(streamtype='signals', shape=(-1,total_channel),\n sample_rate=self.sample_rate)\n self.outputs = {name:pyacq.OutputStream(spec=spec, node=self, name=name) for name, spec in self._output_specs.items()}\n \n \n def _initialize(self):\n BaseProcessingNode._initialize(self)\n self.mutex = Mutex()\n self.processing = self._processing_class(nb_channel=self.nb_channel, sample_rate=self.sample_rate, dtype='float32',\n apply_configuration_at_init=True, **self.params )\n \n \n def set_bypass(self, bypass):\n with self.mutex:\n self.processing.bypass = bypass\n \n \n def proccesing_func(self, pos, data):\n with self.mutex:\n returns = self.processing.proccesing_func(pos, data)\n \n if self.processing.debug_mode:\n \n for k, (pos, chunk) in returns.items():\n if k =='main_output':\n continue\n self.outputs[k].send(chunk, index=pos)\n \n return returns['main_output']\n \n\nfrom .invcgc import InvCGC\n\nclass HLSNode(BasePyacqNode):\n _processing_class = InvComp\n\n def online_configure(self, **params):\n \n self.params = params\n self.debug_mode = self.params.get('debug_mode', False)\n \n print(params)\n t0 = time.perf_counter()\n self.processing.configure(**params)\n t1 = time.perf_counter()\n print(t1-t0)\n self.processing._load_or_make_filters()\n t2 = time.perf_counter()\n print(t2-t1)\n with self.mutex: \n self.processing.initlalize_cl()\n t3 = time.perf_counter()\n print(t3-t2)\n print(t3-t0)\n \n\n"
] | [
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andrewcistola/fracture-proof | [
"d67f8f4f688fe4168d7eae3a3f991463ee112324"
] | [
"version_2/v2-1/mrfp_code.py"
] | [
"# Information\nname = 'mrfpsctwc_fldm2_code' # Inptu file name with topic, subtopic, and type\npath = 'fracture-proof/version_2/v2-1/' # Input relative path to file \ndirectory = '/home/drewc/GitHub/' # Input working directory\ntitle = 'FractureProof v2.1 - Mr. Fracture Proofs Contemplative Woodcarving for Diabetes Mortality in Florida' # Input descriptive title\nauthor = 'Andrew S. Cistola, MPH' # Input Author\n\n## Setup Workspace\n\n### Import python libraries\nimport os # Operating system navigation\nfrom datetime import datetime\nfrom datetime import date\n\n### Import data science libraries\nimport pandas as pd # Widely used data manipulation library with R/Excel like tables named 'data frames'\nimport numpy as np # Widely used matrix library for numerical processes\n\n### Import statistics libraries\nimport scipy.stats as st # Statistics package best for t-test, ChiSq, correlation\nimport statsmodels.api as sm # Statistics package best for regression models\n\n### Import Visualization Libraries\nimport matplotlib.pyplot as plt # Comprehensive graphing package in python\nimport geopandas as gp # Simple mapping library for csv shape files with pandas like syntax for creating plots using matplotlib \n\n### Import scikit-learn libraries\nfrom sklearn.preprocessing import StandardScaler # Standard scaling for easier use of machine learning algorithms\nfrom sklearn.impute import SimpleImputer # Univariate imputation for missing data\nfrom sklearn.decomposition import PCA # Principal compnents analysis from sklearn\nfrom sklearn.ensemble import RandomForestRegressor # Random Forest regression component\nfrom sklearn.feature_selection import RFECV # Recursive Feature elimination with cross validation\nfrom sklearn.svm import LinearSVC # Linear Support Vector Classification from sklearn\nfrom sklearn.linear_model import LinearRegression # Used for machine learning with quantitative outcome\nfrom sklearn.linear_model import LogisticRegression # Used for machine learning with quantitative outcome\nfrom sklearn.model_selection import train_test_split # train test split function for validation\nfrom sklearn.metrics import roc_curve # Reciever operator curve\nfrom sklearn.metrics import auc # Area under the curve \n\n### Import PySAL Libraries\nimport libpysal as ps # Spatial data science modeling tools in python\nfrom mgwr.gwr import GWR, MGWR # Geographic weighted regression modeling tools\nfrom mgwr.sel_bw import Sel_BW # Bandwidth selection for GWR\n\n### Import keras libraries\nfrom keras.models import Sequential # Uses a simple method for building layers in MLPs\nfrom keras.models import Model # Uses a more complex method for building layers in deeper networks\nfrom keras.layers import Dense # Used for creating dense fully connected layers\nfrom keras.layers import Input # Used for designating input layers\n\n### Set Directory\nos.chdir(directory) # Set wd to project repository\n\n### Set Timestamps\nday = str(date.today())\nstamp = str(datetime.now())\n\n### Write corresponding text file for collecting results\ntext_file = open(path + name + '_' + day + '.txt', 'w') # Write new corresponding text file\ntext_file.write('####################' + '\\n\\n') # Add section break for end of step\ntext_file.write('Title: ' + title + '\\n') # Script title\ntext_file.write('Author: ' + author + '\\n') # Script Author\ntext_file.write('Filename: ' + name + '.py' + '\\n') # Filename of script\ntext_file.write('Realtive Path: ' + path + '\\n') # Relative path to script\ntext_file.write('Working Directory: ' + directory + '\\n') # Directory used for script run\ntext_file.write('Time Run: ' + stamp + '\\n') # Timestamp of script run\ntext_file.write('\\n' + '####################' + '\\n\\n') # Add section break for end of step\ntext_file.close() # Close file\n\n## Step 1: Data Processing of Predictors and Outcomes\ns1 = 'Step 1: Raw Data Processing and Feature Engineering' # Step 1 descriptive title\nd1 = 'Florida Deaprtment of Health Vital Statistics 113 Leading Mortality Causes 2014-2018 Zip Code 5-year Average' # Dataset 1 descriptive title\nd2 = 'US Census American Community Survey 2014-2018 Zip Code 5-year Average' # Dataset 2 descriptive title\n\n### Preprocess First Dataset\ndf_d1 = pd.read_csv('fracture-proof/version_2/_data/FDOH_5Y2018_ZCTA.csv') # Import first dataset saved as csv in _data folder\ndf_d1 = df_d1[df_d1['POPULATION'] > 500] # Susbet numeric column by condition\ndf_d1 = df_d1.filter(['K00_K99_R1000', 'ZCTA']) # Drop or filter columns to keep only feature values and idenitifer\ndf_d1 = df_d1.rename(columns = {'ZCTA': 'ID', 'K00_K99_R1000': 'quant'}) # Apply standard name to identifier and quantitative outcome\ndf_d1.info() # Get class, memory, and column info: names, data types, obs\n\n### Preprocess Second Dataset\ndf_d2 = pd.read_csv('fracture-proof/version_2/_data/ACS_5Y2018_ZCTA.csv') # Import second dataset saved as csv in _data folder\ndf_d2 = df_d2.drop(columns = ['ST', 'FIPS']) # Drop or filter columns to keep only feature values and idenitifer\ndf_d2 = df_d2.select_dtypes(exclude = ['int64']) # Drop all unwanted data types\ndf_d2 = df_d2.rename(columns = {'ZCTA': 'ID'}) # Apply standard name to identifier used for joining datasets\ndf_d2.info() # Get class, memory, and column info: names, data types, obs.\n\n### Join Datasets by ID and define targets\ndf_XY = pd.merge(df_d1, df_d2, on = 'ID', how = 'inner') # Join datasets to create table with predictors and outcome\ndf_XY = df_XY.dropna(subset = ['quant']) # Drop all outcome rows with NA values\ndf_XY.info() # Get class, memory, and column info: names, data types, obs.\n\n### Create outcome table\ndf_Y = df_XY.filter(['quant', 'ID']) # Create Outcome table\ndf_Y = df_Y.set_index('ID') # Set identifier as index\ndf_Y.info() # Get class, memory, and column info: names, data types, obs.\n\n### Create standard scaled predictor table\ndf_X = df_XY.drop(columns = ['quant', 'ID']) # Drop Unwanted Columns\ndf_X = df_X.replace([np.inf, -np.inf], np.nan) # Replace infitite values with NA\ndf_X = df_X.dropna(axis = 1, thresh = 0.75*len(df_X)) # Drop features less than 75% non-NA count for all columns\ndf_X = pd.DataFrame(SimpleImputer(strategy = 'median').fit_transform(df_X), columns = df_X.columns) # Impute missing data\ndf_X = pd.DataFrame(StandardScaler().fit_transform(df_X.values), columns = df_X.columns) # Standard scale values by converting the normalized features into a tabular format with the help of DataFrame.\ndf_X['ID'] = df_XY['ID'] # Save ID as column in predictor table\ndf_X = df_X.set_index('ID') # Set identifier as index\ndf_X.info() # Get class, memory, and column info: names, data types, obs.\n\n### Add feature labels\ndf_l1 = pd.read_csv('fracture-proof/version_2/_data/ACS_5Y2018_labels.csv') # Import feature lables for first dataset saved as csv in _data folder\ndf_l2 = pd.read_csv('fracture-proof/version_2/_data/FDOH_5Y2018_labels.csv') # Import feature lables for second dataset saved as csv in _data folder\ndf_l1_l2 = pd.concat([df_l1, df_l2]) # Combine rows with same columns\ndf_l1_l2 = df_l1_l2.filter(['Feature', 'Label']) # Keep only selected columns\ndf_l1_l2 = df_l1_l2.set_index('Feature') # Set column as index\ndf_l1_l2 = df_l1_l2.transpose() # Switch rows and columns\ndf_l1_l2.info # Get class, memory, and column info: names, data types, obs.\n\n### Append step 1 results to corresponding text file\ntext_file = open(path + name + '_' + day + '.txt', 'a') # Open corresponding text file\ntext_file.write(s1 + '\\n\\n') # Step description\ntext_file.write(d1 + '\\n') # Dataset description\ntext_file.write(d2 + '\\n\\n') # Dataset description\ntext_file.write('Target labels: quant = Diabetes Related (K00-K99) Raw Mortality Rate per 1000k' + '\\n') # Dataset methods description\ntext_file.write('Target processing: None' + '\\n\\n') # Dataset methods description\ntext_file.write(str(df_Y.describe()) + '\\n\\n') # Result descriptive statistics for target\ntext_file.write('Features labels: ACS Percent Estimates' + '\\n') # Result description\ntext_file.write('Feature processing: 75% nonNA, Median Imputed NA, Standard Scaled' + '\\n\\n') # Dataset methods description\ntext_file.write('Rows, Columns: ' + str(df_X.shape) + '\\n\\n') # Result description and result dataframe\ntext_file.write('####################' + '\\n\\n') # Add section break for end of step\ntext_file.close() # Close file\n\n## Step 2: Identify Predictors\ns2 = \"Step 2: Identify Predictors with Open Models\" # Step 2 descriptive title\nm1 = \"Principal Component Analysis\" # Model 1 descriptive title\nm2 = \"Random Forests\" # Model 2 descriptive title\nm3 = \"Recursive feature Elimination\" # Model 3 descriptive title\n\n### Principal Component Analysis\ndegree = len(df_X.columns) - 1 # Save number of features -1 to get degrees of freedom\npca = PCA(n_components = degree) # Pass the number of components to make PCA model based on degrees of freedom\npca.fit(df_X) # Fit initial PCA model\ndf_comp = pd.DataFrame(pca.explained_variance_) # Print explained variance of components\ndf_comp = df_comp[(df_comp[0] > 1)] # Save eigenvalues above 1 to identify components\ncomponents = len(df_comp.index) - 1 # Save count of components for Variable reduction\npca = PCA(n_components = components) # you will pass the number of components to make PCA model\npca.fit_transform(df_X) # finally call fit_transform on the aggregate data to create PCA results object\ndf_pc = pd.DataFrame(pca.components_, columns = df_X.columns) # Export eigenvectors to data frame with column names from original data\ndf_pc[\"Variance\"] = pca.explained_variance_ratio_ # Save eigenvalues as their own column\ndf_pc = df_pc[df_pc[\"Variance\"] > df_pc[\"Variance\"].mean()] # Susbet by eigenvalues with above average exlained variance ratio\ndf_pc = df_pc.abs() # Get absolute value of eigenvalues\ndf_pc = df_pc.drop(columns = [\"Variance\"]) # Drop outcomes and targets\ndf_p = pd.DataFrame(df_pc.max(), columns = [\"MaxEV\"]) # select maximum eigenvector for each feature\ndf_p = df_p[df_p.MaxEV > df_p.MaxEV.mean()] # Susbet by above average max eigenvalues \ndf_p = df_p.reset_index() # Add a new index of ascending values, existing index consisting of feature labels becomes column named \"index\"\ndf_pca = df_p.rename(columns = {\"index\": \"Feature\"}) # Rename former index as features\ndf_pca = df_pca.sort_values(by = [\"MaxEV\"], ascending = False) # Sort Columns by Value\ndf_pca.info() # Get class, memory, and column info: names, data types, obs.\n\n### Random Forest Regressor\nforest = RandomForestRegressor(n_estimators = 1000, max_depth = 10) #Use default values except for number of trees. For a further explanation see readme included in repository. \nforest.fit(df_X, df_Y['quant']) # Fit Forest model, This will take time\nrf = forest.feature_importances_ # Output importances of features\nl_rf = list(zip(df_X, rf)) # Create list of variables alongside importance scores \ndf_rf = pd.DataFrame(l_rf, columns = ['Feature', 'Gini']) # Create data frame of importances with variables and gini column names\ndf_rf = df_rf[(df_rf['Gini'] > df_rf['Gini'].mean())] # Subset by Gini values higher than mean\ndf_rf = df_rf.sort_values(by = ['Gini'], ascending = False) # Sort Columns by Value\ndf_rf.info() # Get class, memory, and column info: names, data types, obs.\n\n### Fracture: Join RF and PCA \ndf_fr = pd.merge(df_pca, df_rf, on = 'Feature', how = 'inner') # Join by column while keeping only items that exist in both, select outer or left for other options\nfracture = df_fr['Feature'].tolist() # Save features from data frame\ndf_fr.info() # Get class, memory, and column info: names, data types, obs.\n\n### Recursive Feature Elimination\nrecursive = RFECV(estimator = LinearRegression(), min_features_to_select = 5) # define selection parameters, in this case all features are selected. See Readme for more ifo\nrecursive.fit(df_X[fracture], df_Y['quant']) # This will take time\nrfe = recursive.support_ # Save Boolean values as numpy array\nl_rfe = list(zip(df_X[fracture], rfe)) # Create list of variables alongside RFE value \ndf_rfe = pd.DataFrame(l_rfe, columns = ['Feature', 'RFE']) # Create data frame of importances with variables and gini column names\ndf_rfe = df_rfe.sort_values(by = ['RFE'], ascending = True) # Sort Columns by Value\ndf_rfe = df_rfe[df_rfe['RFE'] == True] # Select Variables that were True\ndf_rfe.info() # Get class, memory, and column info: names, data types, obs.\n\n### FractureProof: Join RFE with Fracture\ndf_fp = pd.merge(df_fr, df_rfe, on = 'Feature', how = 'inner') # Join by column while keeping only items that exist in both, select outer or left for other options\nfractureproof = df_fp['Feature'].tolist() # Save chosen featres as list\ndf_fp.info() # Get class, memory, and column info: names, data types, obs.\n\n### Get FractureProof feature labels\ndf_lfp = df_l1_l2[fractureproof] # Save chosen featres as list\ndf_lfp = df_lfp.transpose() # Switch rows and columns\ndf_lfp = df_lfp.reset_index() # Reset index\nl_lfp = list(zip(df_lfp['Feature'], df_lfp['Label'])) # Create list of variables alongside RFE value \ndf_lfp.info() # Get class, memory, and column info: names, data types, obs.\n\n### Append step 2 results to corresponding text file\ntext_file = open(path + name + '_' + day + '.txt', 'a') # Open corresponding text file\ntext_file.write(s2 + '\\n\\n') # Step description\ntext_file.write('Models: ' + m1 + ', ' + m2 + ', ' + m3 + '\\n\\n') # Model description\ntext_file.write('Values: Eigenvectors, Gini Impurity, Boolean' + '\\n') # Model methods description\ntext_file.write('Thresholds: Mean, Mean, Cross Validation' + '\\n\\n') # Model methods description\ntext_file.write(str(df_fp) + '\\n\\n') # Result dataframe\ntext_file.write(\"Final list of selected features\" + \"\\n\") # Result description\ntext_file.write(str(l_lfp) + '\\n\\n') # Result list\ntext_file.write('####################' + '\\n\\n') # Add section break for end of step\ntext_file.close() # Close file\n\n## Step 3: Create Informative Prediction Model\ns3 = 'Step 3: Create Informative Preidction Model' # Step 3 descriptive title\nm4 = 'Multiple Linear Regression Model' # Model 4 descriptive title\n\n### Add confounders to multiple regression model\nmrfractureproof = df_X[fractureproof].columns.to_list() # Save list of selected variables for multiple regression model\nmrfractureproof.append('quant') # Add outcome to list of selected variables for multiple regression model\nmrfractureproof.append('DP05_0024PE') # Add confounder (Over 65) to list of selected variables for multiple regression model\n\n### Create Multiple Regression Model\ndf_mrfp = df_XY[mrfractureproof] # Subset original nonscaled data for regression\ndf_mrfp = df_mrfp.dropna() # Drop all rows with NA values\nX = df_mrfp.drop(columns = ['quant']) # Susbet predictors for regression\nY = df_mrfp['quant'] # Subset quantitative outcome for regression\nmod = sm.OLS(Y, X) # Create linear regression model\nres = mod.fit() # Fit model to create result\nres.summary() # Print results of regression model\n\n### Add feature labels\nmrfractureproof.remove('quant') # Remove outcome to list of features used for collecting lables\ndf_lmrfp = df_l1_l2[mrfractureproof] # Save selected features as list for collecting labels\nmrfractureproof.append('quant') # Add outcome to to list of selected variables for multiple regression model\ndf_lmrfp = df_lmrfp.transpose() # Switch rows and columns\ndf_lmrfp = df_lmrfp.reset_index() # Reset index\nl_lmrfp = list(zip(df_lmrfp['Feature'], df_lmrfp['Label'])) # Create list of variables alongside RFE value \ndf_lmrfp.info() # Get class, memory, and column info: names, data types, obs.\n\n### Append step 3 results to corresponding text file\ntext_file = open(path + name + '_' + day + '.txt', 'a') # Open corresponding text file \ntext_file.write(s3 + '\\n\\n') # Step title\ntext_file.write('Models: ' + m4 + '\\n\\n') # Model description\ntext_file.write(str(res.summary()) + '\\n\\n') # Result summary\ntext_file.write(str(l_lmrfp) + '\\n\\n') # Result list\ntext_file.write('####################' + '\\n\\n') # Add section break for end of step\ntext_file.close() # Close file"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"pandas.merge",
"pandas.read_csv",
"pandas.concat",
"sklearn.impute.SimpleImputer",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.PCA"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
DarthThomas/vedadet | [
"6c50f7b497d9c24c391539b41498551cc20940a6"
] | [
"vedadet/assembler/trainval.py"
] | [
"import torch\n\nfrom vedacore.hooks import HookPool\nfrom vedacore.loopers import EpochBasedLooper\nfrom vedacore.parallel import MMDataParallel, MMDistributedDataParallel\nfrom vedadet.datasets import build_dataloader, build_dataset\nfrom vedadet.engines import build_engine\n\n\ndef trainval(cfg, distributed, logger):\n\n for mode in cfg.modes:\n assert mode in ('train', 'val')\n\n dataloaders = dict()\n engines = dict()\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n if 'train' in cfg.modes:\n dataset = build_dataset(cfg.data.train)\n\n dataloaders['train'] = build_dataloader(\n dataset,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n dist=distributed,\n seed=cfg.get('seed', None))\n engine = build_engine(cfg.train_engine)\n\n if distributed:\n engine = MMDistributedDataParallel(\n engine.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n engine = MMDataParallel(\n engine.cuda(), device_ids=[torch.cuda.current_device()])\n\n engines['train'] = engine\n\n if 'val' in cfg.modes:\n dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n\n dataloaders['val'] = build_dataloader(\n dataset,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n engine = build_engine(cfg.val_engine)\n if distributed:\n engine = MMDistributedDataParallel(\n engine.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n engine = MMDataParallel(\n engine.cuda(), device_ids=[torch.cuda.current_device()])\n engines['val'] = engine\n\n hook_pool = HookPool(cfg.hooks, cfg.modes, logger)\n\n looper = EpochBasedLooper(cfg.modes, dataloaders, engines, hook_pool,\n logger, cfg.workdir)\n\n if isinstance(looper, EpochBasedLooper):\n looper.hook_pool.register_hook(dict(typename='WorkerInitHook'))\n if distributed:\n looper.hook_pool.register_hook(\n dict(typename='DistSamplerSeedHook'))\n\n if 'weights' in cfg:\n looper.load_weights(**cfg.weights)\n if 'train' in cfg.modes:\n if 'optimizer' in cfg:\n looper.load_optimizer(**cfg.optimizer)\n if 'meta' in cfg:\n looper.load_meta(**cfg.meta)\n else:\n if 'optimizer' in cfg:\n logger.warning('optimizer is not needed in train mode')\n if 'meta' in cfg:\n logger.warning('meta is not needed in train mode')\n looper.start(cfg.max_epochs)\n"
] | [
[
"torch.cuda.current_device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jetyey/Texte | [
"933121f8a79af0db7f7e7fd5d56685a18cd45423"
] | [
"data/util.py"
] | [
"import numpy as np\nimport pandas as pd\n\ndef data_set(path=\"./data/stanfordSentimentTreebank\", drop_neutral=True, cut_off=3, binary=False):\n \"\"\"\n\n :param str path: path to the `stanfordSentimentTreebank`\n :param bool drop_neutral: if ignore neutral label or not\n :param cut_off: cut off the word based on frequency. (remove word, which frequency < cut_off) If None, no cut off\n :param binary: binarize label or not\n :return dict: Stanford Sentiment Treebank data\n \"\"\"\n #df = sst_formatting(path)\n df= pd.read_csv(path, sep=\";\")\n df.columns=[\"Id\",\"data\",\"label\"]\n #label = quantize_label(df[\"label\"].values)\n #df[\"label\"] = label\n original_size = len(df)\n if cut_off is not None:\n df[\"cnt\"] = [len(i.split(' ')) for i in df[\"data\"].values]\n df = df[df.cnt >= cut_off]\n label = df[\"label\"].values\n if drop_neutral:\n df = df[df.label != 3]\n if binary:\n label = df[\"label\"].values\n label[label > 3] = 1\n label[label != 1] = 0\n df[\"label\"] = label\n bal = [np.sum(label == 1), np.sum(label == 0)]\n else:\n bal = [np.sum(label == i) for i in [1, 2, 4, 5]]\n else:\n bal = [np.sum(label == i) for i in [1, 2, 3, 4, 5]]\n return {\"Id\":df.Id.values ,\"label\": df.label.values, \"sentence\": df.data.values, \"original_size\": original_size, \"balance\": bal}\n\n\ndef data_set_default(path=\"./data/stanfordSentimentTreebank\", drop_neutral=True, cut_off=1, binary=False):\n \"\"\"\n :param str path: path to the `stanfordSentimentTreebank`\n :param bool drop_neutral: if ignore neutral label or not\n :param cut_off: cut off the word based on frequency. (remove word, which frequency < cut_off) If None, no cut off\n :param binary: binarize label or not\n :return dict: Stanford Sentiment Treebank data\n \"\"\"\n print(\"in\")\n df = sst_formatting(path)\n label = quantize_label(df[\"label\"].values)\n df[\"label\"] = label\n original_size = len(df)\n if cut_off is not None:\n df[\"cnt\"] = [len(i.split(' ')) for i in df[\"data\"].values]\n df = df[df.cnt >= cut_off]\n label = df[\"label\"].values\n if drop_neutral:\n df = df[df.label != 3]\n if binary:\n label = df[\"label\"].values\n label[label > 3] = 1\n label[label != 1] = 0\n df[\"label\"] = label\n bal = [np.sum(label == 1), np.sum(label == 0)]\n else:\n bal = [np.sum(label == i) for i in [1, 2, 4, 5]]\n else:\n bal = [np.sum(label == i) for i in [1, 2, 3, 4, 5]]\n return {\"label\": df.label.values, \"sentence\": df.data.values, \"original_size\": original_size, \"balance\": bal}\n\n\ndef sst_formatting(path):\n with open(\"%s/sentiment_labels.txt\" % path) as f:\n _tmp = [i.split('|') for i in f.read().split('\\n')]\n _tmp.pop(-1)\n _tmp.pop(0)\n _tmp = np.array(_tmp)\n _df1 = pd.DataFrame(_tmp[:, 1].astype(float), columns=[\"label\"], index=_tmp[:, 0].astype(int))\n\n with open(\"%s/dictionary.txt\" % path) as f:\n _tmp = [i.split('|') for i in f.read().split('\\n')]\n _tmp.pop(-1)\n _tmp = np.array(_tmp)\n _df2 = pd.DataFrame(_tmp[:, 0], columns=[\"data\"], index=_tmp[:, 1].astype(int))\n\n return _df1.join(_df2, how=\"inner\")\n\n\ndef quantize_label(score):\n \"\"\"\n [0, 0.2], (0.2, 0.4], (0.4, 0.6], (0.6, 0.8], (0.8, 1.0]\n for very negative, negative, neutral, positive, very positive, respectively.\n\n :param score:\n :return: very negative: 1, negative: 2, neutral: 3, positive: 4, very positive: 5\n \"\"\"\n label = np.zeros(len(score))\n label[score <= 0.2] += 1\n label[score <= 0.4] += 1\n label[score <= 0.6] += 1\n label[score <= 0.8] += 1\n label[score <= 1] += 1\n return label.astype(int)\n\n\nif __name__ == '__main__':\n data = sst_formatting(\"./data/stanfordSentimentTreebank\")\n length = []\n for _d in data[\"sentence\"]:\n length.append(len(_d.split(' ')))\n print(\"Word distribution\")\n print(\"max\", np.sort(length)[-100:])\n print(\"min\", np.sort(length)[:100])\n print(\"mean %0.2f\" % np.mean(length))\n print(\"median %0.2f\" % np.median(length))\n print(\"balance\", data[\"balance\"])\n print(\"size: %i -> %i\" % (data[\"original_size\"], len(data[\"label\"])))\n"
] | [
[
"pandas.read_csv",
"numpy.median",
"numpy.sort",
"numpy.mean",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
OpenMDAO/OpenMDAO | [
"5c53061decba3bcef63ed7fab73ed1fa52064355"
] | [
"openmdao/components/explicit_func_comp.py"
] | [
"\"\"\"Define the FuncComponent class.\"\"\"\n\ntry:\n import jax\n from jax import jvp, vjp, vmap, random, jit\n import jax.numpy as jnp\nexcept ImportError:\n jax = None\n\nimport re\nimport numpy as np\nfrom numpy import asarray, isscalar, imag, complex as npcomplex\nfrom itertools import product\nfrom openmdao.core.explicitcomponent import ExplicitComponent\nfrom openmdao.core.constants import INT_DTYPE\nfrom openmdao.utils.units import valid_units\nfrom openmdao.utils.om_warnings import issue_warning\nimport openmdao.func_api as omf\n\n\n# regex to check for variable names.\nnamecheck_rgx = re.compile('[_a-zA-Z][_a-zA-Z0-9]*')\n\n# Names that are not allowed for input or output variables (keywords for options)\n_disallowed_varnames = {'units', 'shape', 'shape_by_conn', 'run_root_only', 'distributed',\n 'assembled_jac_type'}\n\n_meta_keep = {'units', 'shape', 'val'}\n_from_def = {'default_units': 'units', 'default_shape': 'shape'}\n\n\ndef _check_units_option(option, value):\n if value is not None and not valid_units(value):\n raise ValueError(f\"The units '{value}' are invalid.\")\n\n\ndef _copy_with_ignore(dct, keepers, ignore=()):\n \"\"\"\n Copy the entries in the given dict whose keys are in keepers.\n\n Parameters\n ----------\n dct : dict\n The dictionary to be copied.\n keepers : set-like\n Set of keys for entries we want to keep.\n ignore : set or tuple\n Ignore these keys.\n\n Returns\n -------\n dict\n A new dict containing 'keepers' entries.\n \"\"\"\n kept = {}\n warn = set()\n for k, v in dct.items():\n if k in keepers:\n kept[k] = v\n elif k not in ignore:\n warn.add(k)\n\n if warn:\n issue_warning(f\"The following metadata entries were ignored: {sorted(warn)}.\")\n\n return kept\n\n\nclass ExplicitFuncComp(ExplicitComponent):\n \"\"\"\n A component that wraps a python function.\n\n Parameters\n ----------\n func : function\n The function to be wrapped by this Component.\n compute_partials : function or None\n If not None, call this function when computing partials.\n **kwargs : named args\n Args passed down to ExplicitComponent.\n\n Attributes\n ----------\n _func : callable\n The function wrapper used by this component.\n _compute_partials : function or None\n If not None, call this function when computing partials.\n \"\"\"\n\n def __init__(self, func, compute_partials=None, **kwargs):\n \"\"\"\n Initialize attributes.\n \"\"\"\n super().__init__(**kwargs)\n self._func = omf.wrap(func)\n self._compute_partials = compute_partials\n\n def setup(self):\n \"\"\"\n Define out inputs and outputs.\n \"\"\"\n optignore = {'is_option'}\n\n for name, meta in self._func.get_input_meta():\n self._check_var_name(name)\n if 'is_option' in meta and meta['is_option']:\n kwargs = _copy_with_ignore(meta, omf._allowed_declare_options_args,\n ignore=optignore)\n self.options.declare(name, **kwargs)\n else:\n kwargs = _copy_with_ignore(meta, omf._allowed_add_input_args)\n self.add_input(name, **kwargs)\n\n for i, (name, meta) in enumerate(self._func.get_output_meta()):\n if name is None:\n raise RuntimeError(f\"{self.msginfo}: Can't add output corresponding to return \"\n f\"value in position {i} because it has no name. Specify the \"\n \"name by returning a variable, for example 'return myvar', or \"\n \"include the name in the function's metadata.\")\n self._check_var_name(name)\n kwargs = _copy_with_ignore(meta, omf._allowed_add_output_args)\n self.add_output(name, **kwargs)\n\n def declare_partials(self, *args, **kwargs):\n \"\"\"\n Declare information about this component's subjacobians.\n\n Parameters\n ----------\n *args : list\n Positional args to be passed to base class version of declare_partials.\n **kwargs : dict\n Keyword args to be passed to base class version of declare_partials.\n\n Returns\n -------\n dict\n Metadata dict for the specified partial(s).\n \"\"\"\n if self._compute_partials is None and ('method' not in kwargs or\n kwargs['method'] == 'exact'):\n raise RuntimeError(f\"{self.msginfo}: declare_partials must be called with method equal \"\n \"to 'cs', 'fd', or 'jax'.\")\n\n return super().declare_partials(*args, **kwargs)\n\n def _setup_partials(self):\n \"\"\"\n Check that all partials are declared.\n \"\"\"\n kwargs = self._func.get_declare_coloring()\n if kwargs is not None:\n self.declare_coloring(**kwargs)\n\n for kwargs in self._func.get_declare_partials():\n self.declare_partials(**kwargs)\n\n super()._setup_partials()\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Compute sub-jacobian parts. The model is assumed to be in an unscaled state.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables read via inputs[key].\n partials : Jacobian\n Sub-jac components written to partials[output_name, input_name].\n \"\"\"\n if self._compute_partials is None:\n return\n\n args = list(inputs.values())\n args.append(partials)\n self._compute_partials(*args)\n\n def _check_var_name(self, name):\n match = namecheck_rgx.match(name)\n if match is None or match.group() != name:\n raise NameError(f\"{self.msginfo}: '{name}' is not a valid variable name.\")\n\n if name in _disallowed_varnames:\n raise NameError(f\"{self.msginfo}: cannot use variable name '{name}' because \"\n \"it's a reserved keyword.\")\n\n def _compute_output_array(self, input_values, output_array):\n \"\"\"\n Fill the given output array with our function result based on the given input values.\n\n Parameters\n ----------\n input_values : tuple of ndarrays or floats\n Unscaled, dimensional input variables.\n output_array\n The output array being filled.\n \"\"\"\n outs = self._func(*input_values)\n if isinstance(outs, tuple):\n start = end = 0\n for o in outs:\n a = asarray(o) if isscalar(o) else o\n end += a.size\n output_array[start:end] = a.flat\n start = end\n else:\n if isscalar(outs):\n output_array[:] = outs\n else:\n output_array[:] = outs.flat\n\n def compute(self, inputs, outputs):\n \"\"\"\n Compute the result of calling our function with the given inputs.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables.\n outputs : Vector\n Unscaled, dimensional output variables.\n \"\"\"\n # this will update the outputs array in place\n self._compute_output_array(inputs.values(), outputs.asarray())\n"
] | [
[
"numpy.asarray",
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Zahlii/shap | [
"28010d77f40141aaf8c69534f9c0878448cd1e49"
] | [
"shap/explainers/tf_utils.py"
] | [
"tf = None\nimport warnings\n\ndef _import_tf():\n \"\"\" Tries to import tensorflow.\n \"\"\"\n global tf\n if tf is None:\n import tensorflow as tf\n\ndef _get_session(session):\n \"\"\" Common utility to get the session for the tensorflow-based explainer.\n\n Parameters\n ----------\n explainer : Explainer\n\n One of the tensorflow-based explainers.\n\n session : tf.compat.v1.Session\n\n An optional existing session.\n \"\"\"\n _import_tf()\n # if we are not given a session find a default session\n if session is None:\n try:\n session = tf.compat.v1.keras.backend.get_session()\n except:\n session = tf.keras.backend.get_session()\n return tf.get_default_session() if session is None else session\n\ndef _get_graph(explainer):\n \"\"\" Common utility to get the graph for the tensorflow-based explainer.\n\n Parameters\n ----------\n explainer : Explainer\n\n One of the tensorflow-based explainers.\n \"\"\"\n _import_tf()\n if not tf.executing_eagerly():\n return explainer.session.graph\n else:\n return getattr(explainer.model_output, \"graph\", None)\n\ndef _get_model_inputs(model):\n \"\"\" Common utility to determine the model inputs.\n\n Parameters\n ----------\n model : Tensorflow Keras model or tuple\n\n The tensorflow model or tuple.\n \"\"\"\n _import_tf()\n if str(type(model)).endswith(\"keras.engine.sequential.Sequential'>\") or \\\n str(type(model)).endswith(\"keras.models.Sequential'>\") or \\\n str(type(model)).endswith(\"keras.engine.training.Model'>\") or \\\n isinstance(model, tf.keras.Model):\n return model.inputs\n elif str(type(model)).endswith(\"tuple'>\"):\n return model[0]\n else:\n assert False, str(type(model)) + \" is not currently a supported model type!\"\n\ndef _get_model_output(model):\n \"\"\" Common utility to determine the model output.\n\n Parameters\n ----------\n model : Tensorflow Keras model or tuple\n\n The tensorflow model or tuple.\n \"\"\"\n _import_tf()\n if str(type(model)).endswith(\"keras.engine.sequential.Sequential'>\") or \\\n str(type(model)).endswith(\"keras.models.Sequential'>\") or \\\n str(type(model)).endswith(\"keras.engine.training.Model'>\") or \\\n isinstance(model, tf.keras.Model):\n if len(model.layers[-1]._inbound_nodes) == 0:\n if len(model.outputs) > 1:\n warnings.warn(\"Only one model output supported.\")\n return model.outputs[0]\n else:\n return model.layers[-1].output\n elif str(type(model)).endswith(\"tuple'>\"):\n return model[1]\n else:\n assert False, str(type(model)) + \" is not currently a supported model type!\"\n"
] | [
[
"tensorflow.get_default_session",
"tensorflow.keras.backend.get_session",
"tensorflow.compat.v1.keras.backend.get_session",
"tensorflow.executing_eagerly"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.7",
"2.5",
"2.8",
"2.10"
]
}
] |
biosustain/croissance | [
"284d08499efa37dd52069c390e1a9612f2496531"
] | [
"croissance/formats/input.py"
] | [
"import pandas\n\n\nclass TSVReader:\n def __init__(self, filepath):\n self._filepath = filepath\n\n def read(self):\n with open(self._filepath, \"rt\") as handle:\n data = pandas.read_csv(handle, sep=\"\\t\", header=0, index_col=0)\n\n return [(name, data[name].dropna()) for name in data.columns]\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n pass\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
enisbelgacem/montepython_public | [
"e829e8bb1185a166e6a02e013cf419469288947e"
] | [
"montepython/likelihoods/HLV_gauss_opt/__init__.py"
] | [
"import os\nimport numpy as np\nfrom montepython.likelihood_class import Likelihood\n\n\nclass HLV_gauss_opt(Likelihood):\n\n # initialization routine\n\n def __init__(self, path, data, command_line):\n\n Likelihood.__init__(self, path, data, command_line)\n\n # define array for values of z and data points\n self.z = np.array([], 'float64')\n self.measdL = np.array([], 'float64')\n self.errdL = np.array([], 'float64')\n\n # read redshifts and data points\n for line in open(os.path.join(\n self.data_directory, self.data_file), 'r'):\n if (line.find('#') == -1):\n self.z = np.append(self.z, float(line.split()[0]))\n self.measdL = np.append(self.measdL, float(line.split()[1]))\n self.errdL = np.append(self.errdL, float(line.split()[3]))\n\n # number of data points\n self.num_points = np.shape(self.z)[0]\n\n # end of initialization\n\n # compute likelihood\n\n def loglkl(self, cosmo, data):\n\n chi2 = 0.\n\n # for each point infer\n # theoretical prediction and difference with observation\n\n for i in range(self.num_points):\n\n theodL = cosmo.luminosity_distance_gw(self.z[i])\n\n chi2 += ((theodL -self.measdL[i]) / (self.errdL[i])) ** 2\n\n # return ln(L)\n lkl = - 0.5 * chi2\n\n return lkl\n"
] | [
[
"numpy.array",
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
km1562/detectron2 | [
"b69418fcde39f84f6900d77a2c9649503b845ed7"
] | [
"tests/test_export_caffe2.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n# -*- coding: utf-8 -*-\n\nimport copy\nimport os\nimport tempfile\nimport unittest\nimport torch\n\nfrom detectron2 import model_zoo\nfrom detectron2.utils.logger import setup_logger\nfrom detectron2.utils.testing import get_sample_coco_image\n\n\[email protected](os.environ.get(\"CI\"), \"Require COCO datas and model zoo.\")\nclass TestCaffe2Export(unittest.TestCase):\n def setUp(self):\n setup_logger()\n\n def _test_model(self, config_path, device=\"cpu\"):\n # requires extra dependencies\n from detectron2.export import Caffe2Model, add_export_config, Caffe2Tracer\n\n cfg = model_zoo.get_config(config_path)\n add_export_config(cfg)\n cfg.MODEL.DEVICE = device\n model = model_zoo.get(config_path, trained=True, device=device)\n\n inputs = [{\"image\": get_sample_coco_image()}]\n tracer = Caffe2Tracer(cfg, model, copy.deepcopy(inputs))\n\n c2_model = tracer.export_caffe2()\n\n with tempfile.TemporaryDirectory(prefix=\"detectron2_unittest\") as d:\n c2_model.save_protobuf(d)\n c2_model.save_graph(os.path.join(d, \"test.svg\"), inputs=copy.deepcopy(inputs))\n\n c2_model = Caffe2Model.load_protobuf(d)\n c2_model(inputs)[0][\"instances\"]\n\n ts_model = tracer.export_torchscript()\n ts_model.save(os.path.join(d, \"model.ts\"))\n\n def testMaskRCNN(self):\n # TODO: this test requires manifold access, see: T88318502\n self._test_model(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\n\n @unittest.skipIf(not torch.cuda.is_available(), \"CUDA not available\")\n def testMaskRCNNGPU(self):\n # TODO: this test requires manifold access, see: T88318502\n self._test_model(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\", device=\"cuda\")\n\n def testRetinaNet(self):\n # TODO: this test requires manifold access, see: T88318502\n self._test_model(\"COCO-Detection/retinanet_R_50_FPN_3x.yaml\")\n\n def testPanopticFPN(self):\n # TODO: this test requires manifold access, see: T88318502\n self._test_model(\"COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml\")\n"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dapengchen123/code_v1 | [
"101dd339d9225e740962974754a7bb1b869b9e72"
] | [
"reid/models/resnetlstm_btfu.py"
] | [
"from __future__ import absolute_import\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\nfrom torch import nn\nfrom torch.autograd import Variable\n\nfrom torchvision.models import resnet18, resnet34, resnet50, resnet101, \\\n resnet152\n\nfrom torch import zeros as t_zeros\n\nLSTM_hidden = 128\nLSTM_layer = 2\n\n\ndef init_states(hidden_sz, batch_sz, layer=1):\n h_0 = Variable(t_zeros(layer, batch_sz, hidden_sz))\n c_0 = Variable(t_zeros(layer, batch_sz, hidden_sz))\n return (h_0.cuda(), c_0.cuda())\n\n\nclass ResNetLSTM_btfu(nn.Module):\n __factory = {\n 18: resnet18,\n 34: resnet34,\n 50: resnet50,\n 101: resnet101,\n 152: resnet152,\n }\n\n\n\n def __init__(self, depth, pretrained=True, num_features=0, norm=False, dropout=0):\n super(ResNetLSTM_btfu, self).__init__()\n\n self.depth = depth\n self.pretrained = pretrained\n\n # Construct base (pretrained) resnet\n if depth not in ResNetLSTM_btfu.__factory:\n raise KeyError(\"Unsupported depth:\". depth)\n\n\n ### At the bottom of CNN network\n conv0 = nn.Conv2d(2, 64, kernel_size=7, stride=2, padding=3, bias=False)\n init.kaiming_normal(conv0.weight, mode='fan_out')\n self.conv0 = conv0\n self.base = ResNetLSTM_btfu.__factory[depth](pretrained=pretrained)\n\n self.num_features = num_features\n self.norm = norm\n self.dropout = dropout\n self.has_embedding = num_features > 0\n\n\n ### Append new upper layers\n out_planes = self.base.fc.in_features\n\n if self.has_embedding:\n self.feat = nn.Linear(out_planes, self.num_features)\n self.feat_bn = nn.BatchNorm1d(self.num_features)\n init.kaiming_normal(self.feat.weight, mode='fan_out')\n init.constant(self.feat.bias, 0)\n init.constant(self.feat_bn.weight, 1)\n init.constant(self.feat_bn.bias, 0)\n else:\n self.num_features = out_planes\n\n if self.dropout > 0:\n self.drop = nn.Dropout(self.dropout)\n\n ### Append LSTM layers\n self.lstm = nn.LSTM(self.num_features, LSTM_hidden, num_layers=LSTM_layer, batch_first=True)\n\n\n def forward(self, imgs, motions):\n\n img_size = imgs.size()\n motion_size = motions.size()\n\n batch_sz = img_size[0]\n seq_len = img_size[1]\n\n\n imgs = imgs.view(img_size[0] * img_size[1], img_size[2], img_size[3], img_size[4])\n motions = motions.view(motion_size[0] * motion_size[1], motion_size[2], motion_size[3], motion_size[4])\n motions = motions[:, 1:3]\n for name, module in self.base._modules.items():\n if name == 'conv1':\n x = module(imgs)+self.conv0(motions)\n continue\n\n if name == 'avgpool':\n break\n\n x = module(x)\n\n x = F.avg_pool2d(x, x.size()[2:])\n x = x.view(x.size(0), -1)\n if self.has_embedding:\n x = self.feat(x)\n x = self.feat_bn(x)\n\n if self.norm:\n x = x / x.norm(2, 1).expand_as(x)\n elif self.has_embedding:\n x = F.relu(x)\n\n if self.dropout > 0:\n x = self.drop(x)\n\n ###### LSTM part #########\n x = x.view(batch_sz, seq_len, -1)\n\n hidden0 = init_states(LSTM_hidden, batch_sz, LSTM_layer)\n output, _ = self.lstm (x, hidden0)\n return output[:, -1]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.nn.init.kaiming_normal",
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.zeros",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.init.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carlosejimenez/leaf | [
"a9507358358bbf12829183cf368bc48586121b99"
] | [
"models/graph.py"
] | [
"import seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nINFILE = './my_metrics/test-accuracy.csv'\nTITLE = 'Testing Accuracy for Baseline'\nX_LABEL = 'round'\nY_LABEL = 'accuracy'\n\nif __name__ == '__main__':\n\n sns.set(style='darkgrid')\n\n data = pd.read_csv(INFILE, header=0)\n\n # Plot the responses for different events and regions\n plt.figure()\n ax = sns.lineplot(x=X_LABEL, y=Y_LABEL, data=data).set_title(TITLE)\n plt.show()\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
marinakiseleva/NADE | [
"69ea890fad1e775dfff2db6dad4dd9d11b6a343e"
] | [
"deepnade/buml/Model/Model.py"
] | [
"import numpy as np\nimport theano\nimport theano.tensor as T\nfrom Utils.theano_helpers import floatX\n\n\nclass Model(object):\n\n def __init__(self):\n self.parameters = dict()\n self.parameters_to_optimise = list()\n self.parameters_to_regularise = list()\n\n def add_parameter(self, parameter, optimise=False, regularise=False):\n \"\"\"\n Adds a parameter to the model\n \"\"\"\n try:\n self.__getattribute__(\"parameters\")\n except AttributeError:\n self.parameters = dict()\n # This should actually be a property of the trainingmethod, not of the\n # model, but I'll do it like this now\n self.parameters_to_optimise = list()\n self.parameters_to_regularise = list()\n self.parameters[parameter.name] = parameter\n if optimise:\n self.parameters_to_optimise.append(parameter.name)\n if regularise:\n self.parameters_to_regularise.append(parameter.name)\n parameter.add_to_model(self)\n\n def get_parameter(self, param_id):\n return self.__getattribute__(param_id)\n\n def get_parameters(self):\n \"\"\"\n Returns a hash with an entry per parameter, where the entry key is the parameter name and its value is a serialization of its value\n suitable for storing in an HDF5 file\n \"\"\"\n params = dict([(name, parameter.get_value())\n for name, parameter in self.parameters.iteritems()])\n params[\"__class__\"] = self.__class__.__name__\n return params\n\n def set_parameters(self, params):\n \"\"\"\n Receives a hash of the kind returned by get_parameters and updates the object's parameters with those values\n \"\"\"\n for name in self.parameters.keys():\n self.parameters[name].set_value(params[name])\n\n def get_parameters_to_optimise(self):\n return self.parameters_to_optimise\n\n def get_parameters_to_regularise(self):\n return self.parameters_to_regularise\n\n def k_like_parameters_to_optimise(self, k, name):\n v = dict()\n for param in self.get_parameters_to_optimise():\n param_v = self.get_parameter(param).get_value()\n param_type = param_v.dtype\n v[param] = theano.shared(np.zeros_like(\n param_v, dtype=floatX) + np.asarray([k], dtype=param_type), name + \"_\" + str(param))\n return v\n\n def finite_diff_gradients(self, f, delta=1e-6):\n \"\"\"\n f is called without parameters, the changes in the parameters happen as a side effect\n \"\"\"\n gradients = dict()\n fx = f()\n for p in self.parameters_to_optimise:\n original = self.parameters[p].get_value()\n grad = np.zeros_like(original)\n if np.prod(original.shape) > 1:\n for index, _ in np.ndenumerate(original):\n xh = original.copy()\n xh[index] += delta\n self.parameters[p].set_value(xh)\n grad[index] = (f() - fx) / delta\n self.parameters[p].set_value(original)\n else:\n xh = original.copy()\n xh += delta\n self.parameters[p].set_value(xh)\n grad = (f() - fx) / delta\n self.parameters[p].set_value(original)\n gradients[p] = grad\n return gradients\n\n\nclass CompositeModel(Model):\n\n def __init__(self):\n super(CompositeModel, self).__init__()\n self.models = dict()\n\n def add_model(self, name, model):\n self.models[name] = model\n\n def get_models(self):\n return self.models\n\n def get_parameter(self, param_id):\n if isinstance(param_id, tuple):\n return self.models[param_id[0]].__getattribute__(param_id[1])\n else:\n return self.__getattribute__(param_id)\n\n def get_parameters(self):\n \"\"\"\n Returns a hash with an entry per parameter and submodel (recursively), where the entry key is the parameter name and its value is a serialization of its value\n suitable for storing in an HDF5 file\n \"\"\"\n params = dict([(name, parameter.get_value())\n for name, parameter in self.parameters.iteritems()])\n params[\"__class__\"] = self.__class__.__name__\n for k, m in self.models.iteritems():\n params[k] = m.get_parameters()\n return params\n\n def set_parameters(self, params):\n \"\"\"\n Receives a hash of the kind returned by get_parameters and updates the object's and submodel's parameters with those values\n \"\"\"\n for k, v in self.parameters.iteritems():\n if isinstance(v, dict):\n self.models[k].set_parameters(v)\n else:\n self.parameters[k].set_value(v)\n\n def get_parameters_to_optimise(self):\n p = list()\n p += self.parameters_to_optimise\n for name, model in self.models.iteritems():\n p += [(name, param) for param in model.get_parameters_to_optimise()]\n return p\n\n def get_parameters_to_regularise(self):\n p = list()\n p += self.parameters_to_regularise\n for name, model in self.models.itervalues():\n p += [(name, param) for param in model.get_parameters_to_regularise()]\n return p\n\n\nclass Parameter(object):\n\n def __init__(self):\n pass\n\n def add_to_model(self, model):\n pass\n\n def set_value(self, value):\n pass\n\n def get_value(self):\n pass\n\n\nclass TensorParameter(Parameter):\n\n def __init__(self, name, shape, theano=True, theano_type=floatX):\n self.name = name\n self.shape = shape\n self.theano = theano\n self.theano_type = theano_type\n\n def add_to_model(self, model):\n self.model = model\n if self.theano:\n setattr(model, self.name, theano.shared(\n np.zeros(self.shape, dtype=self.theano_type), self.name))\n else:\n setattr(model, self.name, np.zeros(self.shape, dtype=self.theano_type))\n\n def set_value(self, value):\n if self.theano:\n self.model.__getattribute__(self.name).set_value(\n np.asarray(value).astype(self.theano_type))\n else:\n setattr(self.model, self.name, value)\n\n def get_value(self):\n if self.theano:\n return self.model.__getattribute__(self.name).get_value()\n else:\n return self.model.__getattribute__(self.name)\n\n\nclass ScalarParameter(Parameter):\n\n def __init__(self, name, default_value, theano=True, theano_type=floatX):\n self.name = name\n self.default_value = default_value\n self.theano = theano\n self.theano_type = theano_type\n\n def add_to_model(self, model):\n self.model = model\n if self.theano:\n setattr(model, self.name, theano.shared(\n np.array(self.default_value, dtype=self.theano_type), self.name))\n else:\n setattr(model, self.name, np.array(\n self.default_value, dtype=self.theano_type))\n\n def set_value(self, value):\n if self.theano:\n self.model.__getattribute__(self.name).set_value(\n np.asarray(value).astype(floatX))\n else:\n setattr(self.model, self.name, value)\n\n def get_value(self):\n if self.theano:\n return self.model.__getattribute__(self.name).get_value()\n else:\n return self.model.__getattribute__(self.name)\n\n\nclass SizeParameter(Parameter):\n\n def __init__(self, name):\n self.name = name\n\n def add_to_model(self, model):\n self.model = model\n setattr(model, self.name, 0)\n\n def set_value(self, value):\n setattr(self.model, self.name, value)\n\n def get_value(self):\n return self.model.__getattribute__(self.name)\n\n\nclass NonLinearityParameter(Parameter):\n\n def __init__(self, name):\n self.name = name\n self.options = {\"tanh\": [T.tanh, np.tanh],\n \"sigmoid\": [T.nnet.sigmoid, lambda x: 1.0 / (1.0 + np.exp(-x))],\n \"RLU\": [lambda x: x * (x > 0), lambda x: x * (x > 0)],\n \"softsign\": [lambda x: x / (1 + T.abs_(x)), lambda x: x / (1 + np.abs(x))],\n \"exponential\": [T.exp, np.exp]}\n\n def add_to_model(self, model):\n self.model = model\n self.value = self.options.items()[0][0]\n setattr(model, self.name, self.options.items()[0][1][0])\n\n def set_value(self, value):\n self.value = value\n setattr(self.model, self.name, self.options[value][0])\n\n def get_numpy_f(self):\n return self.options[self.value][1]\n\n def get_value(self):\n return self.value\n\n def get_name(self):\n return self.value\n"
] | [
[
"numpy.abs",
"numpy.asarray",
"numpy.zeros_like",
"numpy.prod",
"numpy.exp",
"numpy.ndenumerate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rampasek/SAN | [
"6e38329957afa6f5d5d1ce911f7725ca6f385ca4"
] | [
"nets/molhiv_graph_regression/SAN.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nimport dgl\n\nfrom ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder\n\n\"\"\"\n Graph Transformer with edge features\n \n\"\"\"\nfrom layers.graph_transformer_layer import GraphTransformerLayer\nfrom layers.mlp_readout_layer import MLPReadout\n\nclass SAN(nn.Module):\n def __init__(self, net_params):\n super().__init__()\n \n full_graph = net_params['full_graph']\n gamma = net_params['gamma']\n \n GT_layers = net_params['GT_layers']\n GT_hidden_dim = net_params['GT_hidden_dim']\n GT_out_dim = net_params['GT_out_dim']\n GT_n_heads = net_params['GT_n_heads']\n \n self.residual = net_params['residual']\n self.readout = net_params['readout']\n in_feat_dropout = net_params['in_feat_dropout']\n dropout = net_params['dropout']\n\n self.readout = net_params['readout']\n self.layer_norm = net_params['layer_norm']\n self.batch_norm = net_params['batch_norm']\n\n self.device = net_params['device']\n self.in_feat_dropout = nn.Dropout(in_feat_dropout)\n \n self.embedding_h = AtomEncoder(emb_dim = GT_hidden_dim)\n self.embedding_e = BondEncoder(emb_dim = GT_hidden_dim)\n\n \n self.layers = nn.ModuleList([ GraphTransformerLayer(gamma, GT_hidden_dim, GT_hidden_dim, GT_n_heads, full_graph, dropout, self.layer_norm, self.batch_norm, self.residual) for _ in range(GT_layers-1) ]) \n \n self.layers.append(GraphTransformerLayer(gamma, GT_hidden_dim, GT_out_dim, GT_n_heads, full_graph, dropout, self.layer_norm, self.batch_norm, self.residual))\n self.MLP_layer = MLPReadout(GT_out_dim, 1) # out dim for probability \n \n \n def forward(self, g, h, e):\n \n # input embedding\n h = self.embedding_h(h)\n h = self.in_feat_dropout(h)\n e = self.embedding_e(e) \n \n # Second Transformer\n for conv in self.layers:\n h, e = conv(g, h, e)\n g.ndata['h'] = h\n \n if self.readout == \"sum\":\n hg = dgl.sum_nodes(g, 'h')\n elif self.readout == \"max\":\n hg = dgl.max_nodes(g, 'h')\n elif self.readout == \"mean\":\n hg = dgl.mean_nodes(g, 'h')\n else:\n hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes\n \n sig = nn.Sigmoid()\n \n return sig(self.MLP_layer(hg))\n \n def loss(self, scores, targets):\n \n loss = nn.BCELoss()\n \n l = loss(scores.float(), targets.float())\n \n return l\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.BCELoss",
"torch.nn.Sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adeandrade/bayesian-optimizer | [
"30427943d69130179f7ccb32f63a08a1c57462f8"
] | [
"bayesian_optimizer.py"
] | [
"import warnings\nfrom typing import Sequence, Tuple\n\nimport numpy as np\nimport scipy.optimize as sp_optimize\nimport scipy.special as sp_special\n\nfrom .gaussian_process import GaussianProcess\nfrom .kernel import Kernel\nfrom .sobol import new_sobol_sequence_generator\n\n\nclass BayesianOptimizer:\n def __init__(self, kernel: Kernel, sigma: float, input_bounds: Sequence[Tuple[float, float]]):\n \"\"\"\n\n :param kernel:\n :param sigma:\n :param input_bounds:\n \"\"\"\n self.kernel = kernel\n self.sigma = sigma\n self.input_bounds = np.array(input_bounds)\n self.sequence_generator = new_sobol_sequence_generator()\n\n def calculate_expected_improvement(self, candidates: np.ndarray, inputs: np.ndarray, targets: np.ndarray) -> np.ndarray:\n \"\"\"\n\n :param candidates:\n :param inputs:\n :param targets:\n :return:\n \"\"\"\n means, covariances = GaussianProcess.predict(candidates, self.kernel, self.sigma, inputs, targets)\n\n best = np.min(targets, axis=0)\n\n standard_deviations = np.sqrt(np.diag(covariances))[:, np.newaxis]\n\n z_scores = (best - means) / standard_deviations\n\n cdf = 0.5 * (1.0 + sp_special.erf(z_scores / np.sqrt(2)))\n\n pdf = np.exp(-0.5 * z_scores ** 2) / np.sqrt(2 * np.pi)\n\n ei = (best - means) * cdf + standard_deviations * pdf\n\n ei_target_sum = np.sum(ei, axis=-1)\n\n return ei_target_sum\n\n def calculate_expected_improvement_input_gradients(self, candidates: np.ndarray, inputs: np.ndarray, targets: np.ndarray):\n \"\"\"\n\n :param candidates:\n :param inputs:\n :param targets:\n :return:\n \"\"\"\n means, covariances = GaussianProcess.predict(candidates, self.kernel, self.sigma, inputs, targets)\n\n mean_gradients, covariance_gradients = GaussianProcess.calculate_input_gradients(candidates, self.kernel, self.sigma, inputs, targets)\n\n best = np.min(targets, axis=0)\n\n standard_deviations = np.sqrt(np.diag(covariances))[:, np.newaxis]\n\n z_scores = ((best - means) / standard_deviations)\n\n cdf = 0.5 * (1.0 + sp_special.erf(z_scores / np.sqrt(2)))\n\n pdf = np.exp(-0.5 * z_scores ** 2) / np.sqrt(2 * np.pi)\n\n variance_gradients = np.diagonal(covariance_gradients).T\n\n gradients = -1. * mean_gradients * cdf[:, np.newaxis, :]\n gradients += 0.5 * pdf[:, np.newaxis, :] * variance_gradients[:, np.newaxis, :] / standard_deviations[:, np.newaxis, :]\n\n gradients_target_sum = np.sum(gradients, axis=1)\n\n return gradients_target_sum\n\n def generate_candidates(self, num_features: int, num_points: int) -> np.ndarray:\n \"\"\"\n Get candidates with `num_points` for `num_features`.\n Uses Sobol sequences to generate properly distributed points.\n Sobol sequences lie in the unit hypercube so we scale them using bound configuration.\n :param num_features:\n :param num_points:\n :return:\n \"\"\"\n points = self.sequence_generator.generate_sequence(num_features, num_points)\n\n scaled_points = points * (self.input_bounds[1] - self.input_bounds[0]) - self.input_bounds[0]\n\n return scaled_points\n\n def optimize_candidate(self, initial_candidate: np.ndarray, inputs: np.ndarray, targets: np.ndarray) -> np.ndarray:\n \"\"\"\n\n :param initial_candidate:\n :param inputs:\n :param targets:\n :return:\n \"\"\"\n assert initial_candidate.ndim == 1\n\n def objective_function(candidate):\n expected_improvement = self.calculate_expected_improvement(candidate[np.newaxis, :], inputs, targets)\n\n gradients = self.calculate_expected_improvement_input_gradients(candidate[np.newaxis, :], inputs, targets)\n\n return -expected_improvement, -np.squeeze(gradients, axis=0)\n\n optimized_candidate, _, info = sp_optimize.fmin_l_bfgs_b(\n objective_function,\n initial_candidate,\n bounds=self.input_bounds)\n\n if info['warnflag'] != 0:\n warnings.warn(f'fmin_l_bfgs_b terminated abnormally with the state: {info}')\n\n return optimized_candidate\n\n def suggest(\n self,\n inputs: np.ndarray,\n targets: np.ndarray,\n num_candidates: int = 1e4,\n num_optimized_candidates: int = 20) -> np.ndarray:\n \"\"\"\n\n :param inputs:\n :param targets:\n :param num_candidates:\n :param num_optimized_candidates:\n :return:\n \"\"\"\n _, num_features = np.shape(inputs)\n\n initial_candidates = self.generate_candidates(num_features, num_candidates)\n\n initial_acquisitions = self.calculate_expected_improvement(initial_candidates, inputs, targets)\n\n best_candidates = initial_candidates[np.argsort(initial_acquisitions)[-num_optimized_candidates:]]\n\n optimized_candidates = [self.optimize_candidate(candidate, inputs, targets) for candidate in best_candidates]\n\n candidates = np.concatenate([best_candidates, np.stack(optimized_candidates, axis=0)], axis=0)\n\n acquisitions = self.calculate_expected_improvement(candidates, inputs, targets)\n\n suggestion = candidates[np.argmax(acquisitions)]\n\n return suggestion\n"
] | [
[
"numpy.diag",
"numpy.sqrt",
"scipy.optimize.fmin_l_bfgs_b",
"numpy.min",
"numpy.squeeze",
"numpy.stack",
"numpy.argmax",
"numpy.shape",
"numpy.exp",
"numpy.argsort",
"numpy.array",
"numpy.diagonal",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
oruebel/ndx-icephys-meta | [
"c97ea4f0ff60ad05e173cca30b0c46b809727f89"
] | [
"src/pynwb/ndx_icephys_meta/icephys.py"
] | [
"from pynwb import register_class\nfrom pynwb.file import NWBFile\nfrom pynwb.icephys import IntracellularElectrode, PatchClampSeries\nfrom pynwb.base import TimeSeries\nimport numpy as np\ntry:\n from pynwb.core import DynamicTable, DynamicTableRegion, VectorIndex, VectorData # pragma: no cover\nexcept ImportError: # pragma: no cover\n from hdmf.common import DynamicTable, DynamicTableRegion, VectorIndex # pragma: no cover\nfrom hdmf.utils import docval, popargs, getargs, call_docval_func, get_docval, fmt_docval_args\nimport warnings\nimport pandas as pd\nfrom collections import OrderedDict\nfrom copy import copy\n\nnamespace = 'ndx-icephys-meta'\n\n\nclass HierarchicalDynamicTableMixin:\n \"\"\"\n Mixin class for defining specialized functionality for hierarchical dynamic tables.\n\n Assumptions:\n\n 1) The current implementation assumes that there is only one DynamicTableRegion column\n that needs to be expanded as part of the hierarchy. Allowing multiple hierarchical\n columns in a single table get tricky, because it is unclear how those rows should\n be joined. To clarify, allowing multiple DynamicTableRegion should be fine, as long\n as only one of them should be expanded as part of the hierarchy.\n\n 2) The default implementation of the get_hierarchy_column_name function assumes that\n the first DynamicTableRegion that references a DynamicTable that inherits from\n HierarchicalDynamicTableMixin is the one that should be expanded as part of the\n hierarchy of tables. If there is no such column, then the default implementation\n assumes that the first DynamicTableRegion column is the one that needs to be expanded.\n These assumption of get_hierarchy_column_name can be easily fixed by overwriting\n the function in the subclass to return the name of the approbritate column.\n \"\"\"\n\n def get_hierarchy_column_name(self):\n \"\"\"\n Get the name of column that references another DynamicTable that\n is itself a HierarchicalDynamicTableMixin table.\n\n :returns: String with the column name or None\n \"\"\"\n first_col = None\n for col_index, col in enumerate(self.columns):\n if isinstance(col, DynamicTableRegion):\n first_col = col.name\n if isinstance(col.table, HierarchicalDynamicTableMixin):\n return col.name\n return first_col\n\n def get_referencing_column_names(self):\n \"\"\"\n Determine the names of all columns that reference another table, i.e.,\n find all DynamicTableRegion type columns\n\n Returns: List of strings with the column names\n \"\"\"\n col_names = []\n for col_index, col in enumerate(self.columns):\n if isinstance(col, DynamicTableRegion):\n col_names.append(col.name)\n return col_names\n\n def get_targets(self, include_self=False):\n \"\"\"\n Get a list of the full table hierarchy, i.e., recursively list all\n tables referenced in the hierarchy.\n\n Returns: List of DynamicTable objects\n\n \"\"\"\n hcol_name = self.get_hierarchy_column_name()\n hcol = self[hcol_name]\n hcol_target = hcol.table if isinstance(hcol, DynamicTableRegion) else hcol.target.table\n if isinstance(hcol_target, HierarchicalDynamicTableMixin):\n re = [self, ] if include_self else []\n re += [hcol_target, ]\n re += hcol_target.get_targets()\n return re\n else:\n return [hcol_target, ]\n\n def to_denormalized_dataframe(self, flat_column_index=False):\n \"\"\"\n Shorthand for 'self.to_hierarchical_dataframe().reset_index()'\n\n The function denormalizes the hierarchical table and represents all data as\n columns in the resulting dataframe.\n \"\"\"\n hier_df = self.to_hierarchical_dataframe(flat_column_index=True)\n flat_df = hier_df.reset_index()\n if not flat_column_index:\n # cn[0] is the level, cn[1:] is the label. If cn has only 2 elements than use cn[1] instead to\n # avoid creating column labels that are tuples with just one element\n mi_tuples = [(cn[0], cn[1:] if len(cn) > 2 else cn[1])\n for cn in flat_df.columns]\n flat_df.columns = pd.MultiIndex.from_tuples(mi_tuples, names=('source_table', 'label'))\n\n return flat_df\n\n def to_hierarchical_dataframe(self, flat_column_index=False):\n \"\"\"\n Create a Pandas dataframe with a hierarchical MultiIndex index that represents the\n hierarchical dynamic table.\n \"\"\"\n # Get the references column\n hcol_name = self.get_hierarchy_column_name()\n hcol = self[hcol_name]\n hcol_target = hcol.table if isinstance(hcol, DynamicTableRegion) else hcol.target.table\n\n # Create the data variables we need to collect the data for our output dataframe and associated index\n index = []\n data = []\n columns = None\n index_names = None\n\n # If we have indexed columns (other than our hierarchical column) then our index data for our\n # MultiIndex will contain lists as elements (which are not hashable) and as such create an error.\n # As such we need to check if we have any affected columns so we can fix our data\n indexed_column_indicies = np.where([isinstance(self[colname], VectorIndex)\n for colname in self.colnames if colname != hcol_name])[0]\n indexed_column_indicies += 1 # Need to increment by 1 since we add the row id in our iteration below\n\n # Case 1: Our DynamicTableRegion column points to a regular DynamicTable\n # If this is the case than we need to de-normalize the data and flatten the hierarchy\n if not isinstance(hcol_target, HierarchicalDynamicTableMixin):\n # 1) Iterate over all rows in our hierarchical columns (i.e,. the DynamicTableRegion column)\n for row_index, row_df in enumerate(hcol[:]): # need hcol[:] here in case this is an h5py.Dataset\n # 1.1): Since hcol is a DynamicTableRegion, each row returns another DynamicTable so we\n # next need to iterate over all rows in that table to denormalize our data\n for row in row_df.itertuples(index=True):\n # 1.1.1) Determine the column data for our row. Each selected row from our target table\n # becomes a row in our flattened table\n data.append(row)\n # 1.1.2) Determine the multi-index tuple for our row, consisting of: i) id of the row in this\n # table, ii) all columns (except the hierarchical column we are flattening), and\n # iii) the index (i.e., id) from our target row\n index_data = ([self.id[row_index], ] +\n [self[row_index, colname] for colname in self.colnames if colname != hcol_name])\n for i in indexed_column_indicies: # Fix data from indexed columns\n index_data[i] = tuple(index_data[i]) # Convert from list to tuple (which is hashable)\n index.append(tuple(index_data))\n # Determine the names for our index and columns of our output table if this is the first row.\n # These are constant for all rows so we only need to do this onle once for the first row.\n if row_index == 0:\n index_names = ([(self.name, 'id')] +\n [(self.name, colname)\n for colname in self.colnames if colname != hcol_name])\n if flat_column_index:\n columns = [(hcol_target.name, 'id'), ] + list(row_df.columns)\n else:\n columns = pd.MultiIndex.from_tuples([(hcol_target.name, 'id'), ] +\n [(hcol_target.name, c) for c in row_df.columns],\n names=('source_table', 'label'))\n # if we had an empty data table then at least define the columns\n if index_names is None:\n index_names = ([(self.name, 'id')] +\n [(self.name, colname)\n for colname in self.colnames if colname != hcol_name])\n if flat_column_index:\n columns = [(hcol_target.name, 'id'), ] + list(hcol_target.colnames)\n else:\n columns = pd.MultiIndex.from_tuples([(hcol_target.name, 'id'), ] +\n [(hcol_target.name, c) for c in hcol_target.colnames],\n names=('source_table', 'label'))\n\n # Case 2: Our DynamicTableRegion columns points to another HierarchicalDynamicTable.\n else:\n # 1) First we need to recursively flatten the hierarchy by calling 'to_hierarchical_dataframe()'\n # (i.e., this function) on the target of our hierarchical column\n hcol_hdf = hcol_target.to_hierarchical_dataframe(flat_column_index=flat_column_index)\n # 2) Iterate over all rows in our hierarchcial columns (i.e,. the DynamicTableRegion column)\n for row_index, row_df_level1 in enumerate(hcol[:]): # need hcol[:] here in case this is an h5py.Dataset\n # 1.1): Since hcol is a DynamicTableRegion, each row returns another DynamicTable so we\n # next need to iterate over all rows in that table to denormalize our data\n for row_df_level2 in row_df_level1.itertuples(index=True):\n # 1.1.2) Since our target is itself a HierarchicalDynamicTable each target row itself\n # may expand into multiple rows in flattened hcol_hdf. So we now need to look\n # up the rows in hcol_hdf that correspond to the rows in row_df_level2.\n # NOTE: In this look-up we assume that the ids (and hence the index) of\n # each row in the table are in fact unique.\n for row_tuple_level3 in hcol_hdf.loc[[row_df_level2[0]]].itertuples(index=True):\n # 1.1.2.1) Determine the column data for our row.\n data.append(row_tuple_level3[1:])\n # 1.1.2.2) Determine the multi-index tuple for our row,\n index_data = ([self.id[row_index], ] +\n [self[row_index, colname] for colname in self.colnames if colname != hcol_name] +\n list(row_tuple_level3[0]))\n for i in indexed_column_indicies: # Fix data from indexed columns\n index_data[i] = tuple(index_data[i]) # Convert from list to tuple (which is hashable)\n index.append(tuple(index_data))\n # Determine the names for our index and columns of our output table if this is the first row\n if row_index == 0:\n index_names = ([(self.name, \"id\")] +\n [(self.name, colname)\n for colname in self.colnames if colname != hcol_name] +\n hcol_hdf.index.names)\n columns = hcol_hdf.columns\n # if we had an empty table, then at least define the columns\n if index_names is None:\n index_names = ([(self.name, \"id\")] +\n [(self.name, colname)\n for colname in self.colnames if colname != hcol_name] +\n hcol_hdf.index.names)\n columns = hcol_hdf.columns\n\n # Construct the pandas dataframe with the hierarchical multi-index\n multi_index = pd.MultiIndex.from_tuples(index, names=index_names)\n out_df = pd.DataFrame(data=data, index=multi_index, columns=columns)\n return out_df\n\n\n@register_class('AlignedDynamicTable', namespace)\nclass AlignedDynamicTable(DynamicTable):\n \"\"\"\n DynamicTable container that subports storing a collection of subtables. Each sub-table is a\n DynamicTable itself that is aligned with the main table by row index. I.e., all\n DynamicTables stored in this group MUST have the same number of rows. This type effectively\n defines a 2-level table in which the main data is stored in the main table implementd by this type\n and additional columns of the table are grouped into categories, with each category being'\n represented by a separate DynamicTable stored within the group.\n \"\"\"\n __fields__ = ({'name': 'category_tables', 'child': True}, )\n\n @docval(*get_docval(DynamicTable.__init__),\n {'name': 'category_tables', 'type': list,\n 'doc': 'List of DynamicTables to be added to the container', 'default': None},\n {'name': 'categories', 'type': 'array_data',\n 'doc': 'List of names with the ordering of category tables', 'default': None})\n def __init__(self, **kwargs):\n in_category_tables = popargs('category_tables', kwargs)\n in_categories = popargs('categories', kwargs)\n if in_categories is None and in_category_tables is not None:\n in_categories = [tab.name for tab in in_category_tables]\n if in_categories is not None and in_category_tables is None:\n raise ValueError(\"Categories provided but no category_tables given\")\n # at this point both in_categories and in_category_tables should either both be None or both be a list\n if in_categories is not None:\n if len(in_categories) != len(in_category_tables):\n raise ValueError(\"%s category_tables given but %s categories specified\" %\n (len(in_category_tables), len(in_categories)))\n # Initialize the main dynamic table\n call_docval_func(super().__init__, kwargs)\n # Create and set all sub-categories\n dts = OrderedDict()\n # Add the custom categories given as inputs\n if in_category_tables is not None:\n # We may need to resize our main table when adding categories as the user may not have set ids\n if len(in_category_tables) > 0:\n # We have categories to process\n if len(self.id) == 0:\n # The user did not initialize our main table id's nor set columns for our main table\n for i in range(len(in_category_tables[0])):\n self.id.append(i)\n # Add the user-provided categories in the correct order as described by the categories\n # This is necessary, because we do not store the categories explicitly but we maintain them\n # as the order of our self.category_tables. In this makes sure look-ups are consistent.\n lookup_index = OrderedDict([(k, -1) for k in in_categories])\n for i, v in enumerate(in_category_tables):\n # Error check that the name of the table is in our categories list\n if v.name not in lookup_index:\n raise ValueError(\"DynamicTable %s does not appear in categories %s\" % (v.name, str(in_categories)))\n # Error check to make sure no two tables with the same name are given\n if lookup_index[v.name] >= 0:\n raise ValueError(\"Duplicate table name %s found in input dynamic_tables\" % v.name)\n lookup_index[v.name] = i\n for table_name, tabel_index in lookup_index.items():\n # This error case should not be able to occur since the length of the in_categories and\n # in_category_tables must match and we made sure that each DynamicTable we added had its\n # name in the in_categories list. We, therefore, exclude this check from coverage testing\n # but we leave it in just as a backup trigger in case something unexpected happens\n if tabel_index < 0: # pragma: no cover\n raise ValueError(\"DynamicTable %s listed in categories but does not appear in category_tables\" %\n table_name) # pragma: no cover\n # Test that all category tables have the correct number of rows\n category = in_category_tables[tabel_index]\n if len(category) != len(self):\n raise ValueError('Category DynamicTable %s does not align, it has %i rows expected %i' %\n (category.name, len(category), len(self)))\n # Add the category table to our category_tables.\n dts[category.name] = category\n # Set the self.category_tables attribute, which will set the parent/child relationships for the category_tables\n self.category_tables = dts\n\n def __contains__(self, val):\n \"\"\"\n Check if the given value (i.e., column) exists in this table\n\n :param val: If val is a string then check if the given category exists. If val is a tuple\n of two strings (category, colname) then check for the given category if the given colname exists.\n \"\"\"\n if isinstance(val, str):\n return val in self.category_tables or val in self.colnames\n elif isinstance(val, tuple):\n if len(val) != 2:\n raise ValueError(\"Expected tuple of strings of length 2 got tuple of length %i\" % len(val))\n return val[1] in self.get_category(val[0])\n else:\n return False\n\n @property\n def categories(self):\n \"\"\"\n Get the list of names the categories\n\n Short-hand for list(self.category_tables.keys())\n\n :raises: KeyError if the given name is not in self.category_tables\n \"\"\"\n return list(self.category_tables.keys())\n\n @docval({'name': 'category', 'type': DynamicTable, 'doc': 'Add a new DynamicTable category'},)\n def add_category(self, **kwargs):\n \"\"\"\n Add a new DynamicTable to the AlignedDynamicTable to create a new category in the table.\n\n NOTE: The table must align with (i.e, have the same number of rows as) the main data table (and\n other category tables). I.e., if the AlignedDynamicTable is already populated with data\n then we have to populate the new category with the corresponding data before adding it.\n\n :raises: ValueError is raised if the input table does not have the same number of rows as the main table\n \"\"\"\n category = getargs('category', kwargs)\n if len(category) != len(self):\n raise ValueError('New category DynamicTable does not align, it has %i rows expected %i' %\n (len(category), len(self)))\n if category.name in self.category_tables:\n raise ValueError(\"Category %s already in the table\" % category.name)\n self.category_tables[category.name] = category\n category.parent = self\n\n @docval({'name': 'name', 'type': str, 'doc': 'Name of the category we want to retrieve', 'default': None})\n def get_category(self, **kwargs):\n name = popargs('name', kwargs)\n if name is None or (name not in self.category_tables and name == self.name):\n return self\n else:\n return self.category_tables[name]\n\n @docval(*get_docval(DynamicTable.add_column),\n {'name': 'category', 'type': str, 'doc': 'The category the column should be added to',\n 'default': None})\n def add_column(self, **kwargs):\n \"\"\"\n Add a column to the table\n\n :raises: KeyError if the category does not exist\n\n \"\"\"\n category_name = popargs('category', kwargs)\n if category_name is None:\n # Add the column to our main table\n call_docval_func(super().add_column, kwargs)\n else:\n # Add the column to a sub-category table\n try:\n category = self.get_category(category_name)\n except KeyError:\n raise KeyError(\"Category %s not in table\" % category_name)\n category.add_column(**kwargs)\n\n @docval({'name': 'data', 'type': dict, 'doc': 'the data to put in this row', 'default': None},\n {'name': 'id', 'type': int, 'doc': 'the ID for the row', 'default': None},\n {'name': 'enforce_unique_id', 'type': bool, 'doc': 'enforce that the id in the table must be unique',\n 'default': False},\n allow_extra=True)\n def add_row(self, **kwargs):\n \"\"\"\n We can either provide the row data as a single dict or by specifying a dict for each category\n \"\"\"\n data, row_id, enforce_unique_id = popargs('data', 'id', 'enforce_unique_id', kwargs)\n data = data if data is not None else kwargs\n\n # extract the category data\n category_data = {k: data.pop(k) for k in self.categories if k in data}\n\n # Check that we have the approbriate categories provided\n missing_categories = set(self.categories) - set(list(category_data.keys()))\n if missing_categories:\n raise KeyError(\n '\\n'.join([\n 'row data keys don\\'t match available categories',\n 'missing {} category keys: {}'.format(len(missing_categories), missing_categories)\n ])\n )\n # Add the data to our main dynamic table\n data['id'] = row_id\n data['enforce_unique_id'] = enforce_unique_id\n call_docval_func(super().add_row, data)\n\n # Add the data to all out dynamic table categories\n for category, values in category_data.items():\n self.category_tables[category].add_row(**values)\n\n @docval({'name': 'ignore_category_ids', 'type': bool,\n 'doc': \"Ignore id columns of sub-category tables\", 'default': False})\n def to_dataframe(self, **kwargs):\n \"\"\"Convert the collection of tables to a single pandas DataFrame\"\"\"\n dfs = [super().to_dataframe().reset_index(), ]\n if getargs('ignore_category_ids', kwargs):\n dfs += [category.to_dataframe() for category in self.category_tables.values()]\n else:\n dfs += [category.to_dataframe().reset_index() for category in self.category_tables.values()]\n names = [self.name, ] + list(self.category_tables.keys())\n res = pd.concat(dfs, axis=1, keys=names)\n res.set_index((self.name, 'id'), drop=True, inplace=True)\n return res\n\n def __getitem__(self, item):\n \"\"\"\n If item is:\n * int : Return a single row of the table\n * string : Return a single category of the table\n * tuple: Get a column, row, or cell from a particular category. The tuple is expected to consist\n of (category, selection) where category may be a string with the name of the sub-category\n or None (or the name of this AlignedDynamicTable) if we want to slice into the main table.\n\n :returns: DataFrame when retrieving a row or category. Returns scalar when selecting a cell.\n Returns a VectorData/VectorIndex when retrieving a single column.\n \"\"\"\n if isinstance(item, (int, list, np.ndarray, slice)):\n # get a single full row from all tables\n dfs = ([super().__getitem__(item).reset_index(), ] +\n [category[item].reset_index() for category in self.category_tables.values()])\n names = [self.name, ] + list(self.category_tables.keys())\n res = pd.concat(dfs, axis=1, keys=names)\n res.set_index((self.name, 'id'), drop=True, inplace=True)\n return res\n elif isinstance(item, str) or item is None:\n if item in self.colnames:\n # get a specfic column\n return super().__getitem__(item)\n else:\n # get a single category\n return self.get_category(item).to_dataframe()\n elif isinstance(item, tuple):\n if len(item) == 2:\n return self.get_category(item[0])[item[1]]\n elif len(item) == 3:\n return self.get_category(item[0])[item[1]][item[2]]\n else:\n raise ValueError(\"Expected tuple of length 2 or 3 with (category, column, row) as value.\")\n\n\n@register_class('TimeSeriesReferenceVectorData', namespace)\nclass TimeSeriesReferenceVectorData(VectorData):\n \"\"\"\n Column storing references to a TimeSeries (rows). For each TimeSeries this VectorData\n column stores the start_index and count to indicate the range in time to be selected\n as well as an object reference to the TimeSeries.\n \"\"\"\n\n @docval(*get_docval(VectorData.__init__))\n def __init__(self, **kwargs):\n call_docval_func(super().__init__, kwargs)\n\n\n@register_class('IntracellularElectrodesTable', namespace)\nclass IntracellularElectrodesTable(DynamicTable):\n \"\"\"\n Table for storing intracellular electrode related metadata'\n \"\"\"\n __columns__ = (\n {'name': 'electrode',\n 'description': 'Column for storing the reference to the intracellular electrode',\n 'required': True,\n 'index': False,\n 'table': False},\n )\n\n @docval(*get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))\n def __init__(self, **kwargs):\n # Define defaultb name and description settings\n kwargs['name'] = 'electrodes'\n kwargs['description'] = ('Table for storing intracellular electrode related metadata')\n # Initialize the DynamicTable\n call_docval_func(super().__init__, kwargs)\n\n\n@register_class('IntracellularStimuliTable', namespace)\nclass IntracellularStimuliTable(DynamicTable):\n \"\"\"\n Table for storing intracellular electrode related metadata'\n \"\"\"\n __columns__ = (\n {'name': 'stimulus',\n 'description': 'Column storing the reference to the recorded stimulus for the recording (rows)',\n 'required': True,\n 'index': False,\n 'table': False,\n 'class': TimeSeriesReferenceVectorData},\n )\n\n @docval(*get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))\n def __init__(self, **kwargs):\n # Define defaultb name and description settings\n kwargs['name'] = 'stimuli'\n kwargs['description'] = ('Table for storing intracellular stimulus related metadata')\n # Initialize the DynamicTable\n call_docval_func(super().__init__, kwargs)\n\n\n@register_class('IntracellularResponsesTable', namespace)\nclass IntracellularResponsesTable(DynamicTable):\n \"\"\"\n Table for storing intracellular electrode related metadata'\n \"\"\"\n __columns__ = (\n {'name': 'response',\n 'description': 'Column storing the reference to the recorded response for the recording (rows)',\n 'required': True,\n 'index': False,\n 'table': False,\n 'class': TimeSeriesReferenceVectorData},\n )\n\n @docval(*get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))\n def __init__(self, **kwargs):\n # Define defaultb name and description settings\n kwargs['name'] = 'responses'\n kwargs['description'] = ('Table for storing intracellular response related metadata')\n # Initialize the DynamicTable\n call_docval_func(super().__init__, kwargs)\n\n\n@register_class('IntracellularRecordingsTable', namespace)\nclass IntracellularRecordingsTable(AlignedDynamicTable):\n \"\"\"\n A table to group together a stimulus and response from a single electrode and\n a single simultaneous_recording. Each row in the table represents a single recording consisting\n typically of a stimulus and a corresponding response.\n \"\"\"\n @docval(*get_docval(AlignedDynamicTable.__init__, 'id', 'columns', 'colnames', 'category_tables', 'categories'))\n def __init__(self, **kwargs):\n kwargs['name'] = 'intracellular_recordings'\n kwargs['description'] = ('A table to group together a stimulus and response from a single electrode '\n 'and a single simultaneous recording and for storing metadata about the '\n 'intracellular recording.')\n in_category_tables = getargs('category_tables', kwargs)\n if in_category_tables is None or len(in_category_tables) == 0:\n kwargs['category_tables'] = [IntracellularElectrodesTable(),\n IntracellularStimuliTable(),\n IntracellularResponsesTable()]\n kwargs['categories'] = None\n else:\n # Check if our required data tables are supplied, otherwise add them to the list\n required_dynamic_table_given = [-1 for i in range(3)] # The first three are our required tables\n for i, tab in enumerate(in_category_tables):\n if isinstance(tab, IntracellularElectrodesTable):\n required_dynamic_table_given[0] = i\n elif isinstance(tab, IntracellularStimuliTable):\n required_dynamic_table_given[1] = i\n elif isinstance(tab, IntracellularResponsesTable):\n required_dynamic_table_given[2] = i\n # Check if the supplied tables contain data but not all required tables have been supplied\n required_dynamic_table_missing = np.any(np.array(required_dynamic_table_given[0:3]) < 0)\n if len(in_category_tables[0]) != 0 and required_dynamic_table_missing:\n raise ValueError(\"IntracellularElectrodeTable, IntracellularStimuliTable, and \"\n \"IntracellularResponsesTable are required when adding custom, non-empty \"\n \"tables to IntracellularRecordingsTable as the missing data for the required \"\n \"tables cannot be determined automatically\")\n # Compile the complete list of tables\n dynamic_table_arg = copy(in_category_tables)\n categories_arg = [] if getargs('categories', kwargs) is None else copy(getargs('categories', kwargs))\n if required_dynamic_table_missing:\n if required_dynamic_table_given[2] < 0:\n dynamic_table_arg.append(IntracellularResponsesTable)\n if not dynamic_table_arg[-1].name in categories_arg:\n categories_arg.insert(0, dynamic_table_arg[-1].name)\n if required_dynamic_table_given[1] < 0:\n dynamic_table_arg.append(IntracellularStimuliTable())\n if not dynamic_table_arg[-1].name in categories_arg:\n categories_arg.insert(0, dynamic_table_arg[-1].name)\n if required_dynamic_table_given[0] < 0:\n dynamic_table_arg.append(IntracellularElectrodesTable())\n if not dynamic_table_arg[-1].name in categories_arg:\n categories_arg.insert(0, dynamic_table_arg[-1].name)\n kwargs['category_tables'] = dynamic_table_arg\n kwargs['categories'] = categories_arg\n\n call_docval_func(super().__init__, kwargs)\n\n @docval({'name': 'electrode', 'type': IntracellularElectrode, 'doc': 'The intracellular electrode used'},\n {'name': 'stimulus_start_index', 'type': 'int', 'doc': 'Start index of the stimulus', 'default': -1},\n {'name': 'stimulus_index_count', 'type': 'int', 'doc': 'Stop index of the stimulus', 'default': -1},\n {'name': 'stimulus', 'type': TimeSeries,\n 'doc': 'The TimeSeries (usually a PatchClampSeries) with the stimulus',\n 'default': None},\n {'name': 'response_start_index', 'type': 'int', 'doc': 'Start index of the response', 'default': -1},\n {'name': 'response_index_count', 'type': 'int', 'doc': 'Stop index of the response', 'default': -1},\n {'name': 'response', 'type': TimeSeries,\n 'doc': 'The TimeSeries (usually a PatchClampSeries) with the response',\n 'default': None},\n {'name': 'electrode_metadata', 'type': dict,\n 'doc': 'Additional electrode metadata to be stored in the electrodes table', 'default': None},\n {'name': 'stimulus_metadata', 'type': dict,\n 'doc': 'Additional stimulus metadata to be stored in the stimuli table', 'default': None},\n {'name': 'response_metadata', 'type': dict,\n 'doc': 'Additional resposnse metadata to be stored in the responses table', 'default': None},\n returns='Integer index of the row that was added to this table',\n rtype=int,\n allow_extra=True)\n def add_recording(self, **kwargs):\n \"\"\"\n Add a single recording to the IntracellularRecordingsTable table.\n\n Typically, both stimulus and response are expected. However, in some cases only a stimulus\n or a resposne may be recodred as part of a recording. In this case, None, may be given\n for either stimulus or response, but not both. Internally, this results in both stimulus\n and response pointing to the same timeseries, while the start_index and index_count for\n the invalid series will both be set to -1.\n \"\"\"\n # Get the input data\n stimulus_start_index, stimulus_index_count, stimulus = popargs('stimulus_start_index',\n 'stimulus_index_count',\n 'stimulus',\n kwargs)\n response_start_index, response_index_count, response = popargs('response_start_index',\n 'response_index_count',\n 'response',\n kwargs)\n electrode = popargs('electrode', kwargs)\n # Confirm that we have at least a valid stimulus or response\n if stimulus is None and response is None:\n raise ValueError(\"stimulus and response cannot both be None.\")\n\n # Compute the start and stop index if necessary\n if stimulus is not None:\n stimulus_start_index, stimulus_index_count = self.__compute_index(stimulus_start_index,\n stimulus_index_count,\n stimulus, 'stimulus')\n if response is not None:\n response_start_index, response_index_count = self.__compute_index(response_start_index,\n response_index_count,\n response, 'response')\n\n # If either stimulus or response are None, then set them to the same TimeSeries to keep the I/O happy\n response = response if response is not None else stimulus\n stimulus = stimulus if stimulus is not None else response\n\n # Make sure the types are compatible\n if ((response.neurodata_type.startswith(\"CurrentClamp\") and\n stimulus.neurodata_type.startswith(\"VoltageClamp\")) or\n (response.neurodata_type.startswith(\"VoltageClamp\") and\n stimulus.neurodata_type.startswith(\"CurrentClamp\"))):\n raise ValueError(\"Incompatible types given for 'stimulus' and 'response' parameters. \"\n \"'stimulus' is of type %s and 'response' is of type %s.\" %\n (stimulus.neurodata_type, response.neurodata_type))\n if response.neurodata_type == 'IZeroClampSeries':\n if stimulus is not None:\n raise ValueError(\"stimulus should usually be None for IZeroClampSeries response\")\n if isinstance(response, PatchClampSeries) and isinstance(stimulus, PatchClampSeries):\n # # We could also check sweep_number, but since it is mostly relevant to the deprecated SweepTable\n # # we don't really need to enforce it here\n # if response.sweep_number != stimulus.sweep_number:\n # warnings.warn(\"sweep_number are usually expected to be the same for PatchClampSeries type \"\n # \"stimulus and response pairs in an intracellular recording.\")\n if response.electrode != stimulus.electrode:\n raise ValueError(\"electrodes are usually expected to be the same for PatchClampSeries type \"\n \"stimulus and response pairs in an intracellular recording.\")\n\n # Compile the electrodes table data\n electrodes = copy(popargs('electrode_metadata', kwargs))\n if electrodes is None:\n electrodes = {}\n electrodes['electrode'] = electrode\n\n # Compile the stimuli table data\n stimuli = copy(popargs('stimulus_metadata', kwargs))\n if stimuli is None:\n stimuli = {}\n stimuli['stimulus'] = (stimulus_start_index, stimulus_index_count, stimulus)\n\n # Compile the reponses table data\n responses = copy(popargs('response_metadata', kwargs))\n if responses is None:\n responses = {}\n responses['response'] = (response_start_index, response_index_count, response)\n\n _ = super().add_row(enforce_unique_id=True,\n electrodes=electrodes,\n responses=responses,\n stimuli=stimuli,\n **kwargs)\n return len(self) - 1\n\n @staticmethod\n def __compute_index(start_index, index_count, time_series, name):\n start_index = start_index if start_index >= 0 else 0\n num_samples = time_series.num_samples\n index_count = (index_count\n if index_count >= 0\n else ((num_samples - start_index)\n if num_samples is not None\n else None))\n if index_count is None:\n raise IndexError(\"Invalid %s_index_count cannot be determined from %s data.\" % (name, name))\n if num_samples is not None:\n if start_index >= num_samples:\n raise IndexError(\"%s_start_index out of range\" % name)\n if (start_index + index_count) > num_samples:\n raise IndexError(\"%s_start_index + %s_index_count out of range\" % (name, name))\n return start_index, index_count\n\n @docval(*get_docval(AlignedDynamicTable.to_dataframe, 'ignore_category_ids'),\n {'name': 'electrode_refs_as_objectids', 'type': bool,\n 'doc': 'replace object references in the electrode column with object_ids',\n 'default': False},\n {'name': 'stimulus_refs_as_objectids', 'type': bool,\n 'doc': 'replace object references in the stimulus column with object_ids',\n 'default': False},\n {'name': 'response_refs_as_objectids', 'type': bool,\n 'doc': 'replace object references in the response column with object_ids',\n 'default': False}\n )\n def to_dataframe(self, **kwargs):\n \"\"\"Convert the collection of tables to a single pandas DataFrame\"\"\"\n res = super().to_dataframe(ignore_category_ids=getargs('ignore_category_ids', kwargs))\n if getargs('electrode_refs_as_objectids', kwargs):\n res[('electrodes', 'electrode')] = [e.object_id for e in res[('electrodes', 'electrode')]]\n if getargs('stimulus_refs_as_objectids', kwargs):\n res[('stimuli', 'stimulus')] = [(e[0], e[1], e[2].object_id) for e in res[('stimuli', 'stimulus')]]\n if getargs('response_refs_as_objectids', kwargs):\n res[('responses', 'response')] = [(e[0], e[1], e[2].object_id) for e in res[('responses', 'response')]]\n return res\n\n\n@register_class('SimultaneousRecordingsTable', namespace)\nclass SimultaneousRecordingsTable(HierarchicalDynamicTableMixin, DynamicTable):\n \"\"\"\n A table for grouping different intracellular recordings from the\n IntracellularRecordingsTable table together that were recorded simultaneously\n from different electrodes.\n \"\"\"\n\n __columns__ = (\n {'name': 'recordings',\n 'description': 'Column with a references to one or more rows in the IntracellularRecordingsTable table',\n 'required': True,\n 'index': True,\n 'table': True},\n )\n\n @docval({'name': 'intracellular_recordings_table',\n 'type': IntracellularRecordingsTable,\n 'doc': 'the IntracellularRecordingsTable table that the recordings column indexes. May be None when '\n 'reading the Container from file as the table attribute is already populated in this case '\n 'but otherwise this is required.',\n 'default': None},\n *get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))\n def __init__(self, **kwargs):\n intracellular_recordings_table = popargs('intracellular_recordings_table', kwargs)\n # Define default name and description settings\n kwargs['name'] = 'simultaneous_recordings'\n kwargs['description'] = ('A table for grouping different intracellular recordings from the'\n 'IntracellularRecordingsTable table together that were recorded simultaneously '\n 'from different electrodes.')\n # Initialize the DynamicTable\n call_docval_func(super().__init__, kwargs)\n if self['recordings'].target.table is None:\n if intracellular_recordings_table is not None:\n self['recordings'].target.table = intracellular_recordings_table\n else:\n raise ValueError(\"intracellular_recordings constructor argument required\")\n\n @docval({'name': 'recordings',\n 'type': 'array_data',\n 'doc': 'the indices of the recordings belonging to this simultaneous recording'},\n returns='Integer index of the row that was added to this table',\n rtype=int,\n allow_extra=True)\n def add_simultaneous_recording(self, **kwargs):\n \"\"\"\n Add a single Sweep consisting of one-or-more recordings and associated custom\n SimultaneousRecordingsTable metadata to the table.\n \"\"\"\n _ = super().add_row(enforce_unique_id=True, **kwargs)\n return len(self.id) - 1\n\n\n@register_class('SequentialRecordingsTable', namespace)\nclass SequentialRecordingsTable(HierarchicalDynamicTableMixin, DynamicTable):\n \"\"\"\n A table for grouping different intracellular recording simultaneous_recordings from the\n SimultaneousRecordingsTable table together. This is typically used to group together simultaneous_recordings\n where the a sequence of stimuli of the same type with varying parameters\n have been presented in a sequence.\n \"\"\"\n\n __columns__ = (\n {'name': 'simultaneous_recordings',\n 'description': 'Column with a references to one or more rows in the SimultaneousRecordingsTable table',\n 'required': True,\n 'index': True,\n 'table': True},\n {'name': 'stimulus_type',\n 'description': 'Column storing the type of stimulus used for the sequential recording',\n 'required': True,\n 'index': False,\n 'table': False}\n )\n\n @docval({'name': 'simultaneous_recordings_table',\n 'type': SimultaneousRecordingsTable,\n 'doc': 'the SimultaneousRecordingsTable table that the simultaneous_recordings '\n 'column indexes. May be None when reading the Container from file as the '\n 'table attribute is already populated in this case but otherwise this is required.',\n 'default': None},\n *get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))\n def __init__(self, **kwargs):\n simultaneous_recordings_table = popargs('simultaneous_recordings_table', kwargs)\n # Define defaultb name and description settings\n kwargs['name'] = 'sequential_recordings'\n kwargs['description'] = ('A table for grouping different intracellular recording simultaneous_recordings '\n 'from the SimultaneousRecordingsTable table together. This is typically used to '\n 'group together simultaneous_recordings where the a sequence of stimuli of the '\n 'same type with varying parameters have been presented in a sequence.')\n # Initialize the DynamicTable\n call_docval_func(super().__init__, kwargs)\n if self['simultaneous_recordings'].target.table is None:\n if simultaneous_recordings_table is not None:\n self['simultaneous_recordings'].target.table = simultaneous_recordings_table\n else:\n raise ValueError('simultaneous_recordings_table constructor argument required')\n\n @docval({'name': 'stimulus_type',\n 'type': str,\n 'doc': 'the type of stimulus used for the sequential recording'},\n {'name': 'simultaneous_recordings',\n 'type': 'array_data',\n 'doc': 'the indices of the simultaneous_recordings belonging to this sequential recording'},\n returns='Integer index of the row that was added to this table',\n rtype=int,\n allow_extra=True)\n def add_sequential_recording(self, **kwargs):\n \"\"\"\n Add a sequential recording (i.e., one row) consisting of one-or-more recording simultaneous_recordings\n and associated custom sequential recording metadata to the table.\n \"\"\"\n _ = super().add_row(enforce_unique_id=True, **kwargs)\n return len(self.id) - 1\n\n\n@register_class('RepetitionsTable', namespace)\nclass RepetitionsTable(HierarchicalDynamicTableMixin, DynamicTable):\n \"\"\"\n A table for grouping different intracellular recording sequential recordings together.\n With each SweepSequence typically representing a particular type of stimulus, the\n RepetitionsTable table is typically used to group sets of stimuli applied in sequence.\n \"\"\"\n\n __columns__ = (\n {'name': 'sequential_recordings',\n 'description': 'Column with a references to one or more rows in the SequentialRecordingsTable table',\n 'required': True,\n 'index': True,\n 'table': True},\n )\n\n @docval({'name': 'sequential_recordings_table',\n 'type': SequentialRecordingsTable,\n 'doc': 'the SequentialRecordingsTable table that the sequential_recordings column indexes. May '\n 'be None when reading the Container from file as the table attribute is already populated '\n 'in this case but otherwise this is required.',\n 'default': None},\n *get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))\n def __init__(self, **kwargs):\n sequential_recordings_table = popargs('sequential_recordings_table', kwargs)\n # Define default name and description settings\n kwargs['name'] = 'repetitions'\n kwargs['description'] = ('A table for grouping different intracellular recording sequential recordings '\n 'together. With each SimultaneousRecording typically representing a particular type '\n 'of stimulus, the RepetitionsTable table is typically used to group sets '\n 'of stimuli applied in sequence.')\n # Initialize the DynamicTable\n call_docval_func(super().__init__, kwargs)\n if self['sequential_recordings'].target.table is None:\n if sequential_recordings_table is not None:\n self['sequential_recordings'].target.table = sequential_recordings_table\n else:\n raise ValueError('sequential_recordings_table constructor argument required')\n\n @docval({'name': 'sequential_recordings',\n 'type': 'array_data',\n 'doc': 'the indices of the sequential recordings belonging to this repetition',\n 'default': None},\n returns='Integer index of the row that was added to this table',\n rtype=int,\n allow_extra=True)\n def add_repetition(self, **kwargs):\n \"\"\"\n Add a repetition (i.e., one row) consisting of one-or-more recording sequential recordings\n and associated custom repetition metadata to the table.\n \"\"\"\n _ = super().add_row(enforce_unique_id=True, **kwargs)\n return len(self.id) - 1\n\n\n@register_class('ExperimentalConditionsTable', namespace)\nclass ExperimentalConditionsTable(HierarchicalDynamicTableMixin, DynamicTable):\n \"\"\"\n A table for grouping different intracellular recording repetitions together that\n belong to the same experimental conditions.\n \"\"\"\n\n __columns__ = (\n {'name': 'repetitions',\n 'description': 'Column with a references to one or more rows in the RepetitionsTable table',\n 'required': True,\n 'index': True,\n 'table': True},\n )\n\n @docval({'name': 'repetitions_table',\n 'type': RepetitionsTable,\n 'doc': 'the RepetitionsTable table that the repetitions column indexes',\n 'default': None},\n *get_docval(DynamicTable.__init__, 'id', 'columns', 'colnames'))\n def __init__(self, **kwargs):\n repetitions_table = popargs('repetitions_table', kwargs)\n # Define default name and description settings\n kwargs['name'] = 'experimental_conditions'\n kwargs['description'] = ('A table for grouping different intracellular recording repetitions together that '\n 'belong to the same experimental experimental_conditions.')\n # Initialize the DynamicTable\n call_docval_func(super().__init__, kwargs)\n if self['repetitions'].target.table is None:\n if repetitions_table is not None:\n self['repetitions'].target.table = repetitions_table\n else:\n raise ValueError('repetitions_table constructor argument required')\n\n @docval({'name': 'repetitions',\n 'type': 'array_data',\n 'doc': 'the indices of the repetitions belonging to this condition',\n 'default': None},\n returns='Integer index of the row that was added to this table',\n rtype=int,\n allow_extra=True)\n def add_experimental_condition(self, **kwargs):\n \"\"\"\n Add a condition (i.e., one row) consisting of one-or-more recording repetitions of sequential recordings\n and associated custom experimental_conditions metadata to the table.\n \"\"\"\n _ = super().add_row(enforce_unique_id=True, **kwargs)\n return len(self.id) - 1\n\n\n@register_class('ICEphysFile', namespace)\nclass ICEphysFile(NWBFile):\n \"\"\"\n Extension of the NWBFile class to allow placing the new icephys\n metadata types in /general/intracellular_ephys in the NWBFile\n NOTE: If this proposal for extension to NWB gets merged with\n the core schema, then this type would be removed and the\n NWBFile specification updated instead\n \"\"\"\n\n __nwbfields__ = ({'name': 'intracellular_recordings',\n 'child': True,\n 'required_name': 'intracellular_recordings',\n 'doc': 'IntracellularRecordingsTable table to group together a stimulus and response '\n 'from a single intracellular electrode and a single simultaneous recording.'},\n {'name': 'icephys_simultaneous_recordings',\n 'child': True,\n 'required_name': 'simultaneous_recordings',\n 'doc': 'SimultaneousRecordingsTable table for grouping different intracellular recordings from'\n 'the IntracellularRecordingsTable table together that were recorded simultaneously '\n 'from different electrodes'},\n {'name': 'icephys_sequential_recordings',\n 'child': True,\n 'required_name': 'sequential_recordings',\n 'doc': 'A table for grouping different simultaneous intracellular recording from the '\n 'SimultaneousRecordingsTable table together. This is typically used to group '\n 'together simultaneous recordings where the a sequence of stimuli of the same '\n 'type with varying parameters have been presented in a sequence.'},\n {'name': 'icephys_repetitions',\n 'child': True,\n 'required_name': 'repetitions',\n 'doc': 'A table for grouping different intracellular recording sequential recordings together.'\n 'With each SweepSequence typically representing a particular type of stimulus, the '\n 'RepetitionsTable table is typically used to group sets of stimuli applied in sequence.'},\n {'name': 'icephys_experimental_conditions',\n 'child': True,\n 'required_name': 'experimental_conditions',\n 'doc': 'A table for grouping different intracellular recording repetitions together that '\n 'belong to the same experimental experimental_conditions.'},\n )\n\n @docval(*get_docval(NWBFile.__init__),\n {'name': 'intracellular_recordings', 'type': IntracellularRecordingsTable, 'default': None,\n 'doc': 'the IntracellularRecordingsTable table that belongs to this NWBFile'},\n {'name': 'icephys_simultaneous_recordings', 'type': SimultaneousRecordingsTable, 'default': None,\n 'doc': 'the SimultaneousRecordingsTable table that belongs to this NWBFile'},\n {'name': 'icephys_sequential_recordings', 'type': SequentialRecordingsTable, 'default': None,\n 'doc': 'the SequentialRecordingsTable table that belongs to this NWBFile'},\n {'name': 'icephys_repetitions', 'type': RepetitionsTable, 'default': None,\n 'doc': 'the RepetitionsTable table that belongs to this NWBFile'},\n {'name': 'icephys_experimental_conditions', 'type': ExperimentalConditionsTable, 'default': None,\n 'doc': 'the ExperimentalConditionsTable table that belongs to this NWBFile'},\n {'name': 'ic_filtering', 'type': str, 'default': None,\n 'doc': '[DEPRECATED] Use IntracellularElectrode.filtering instead. Description of filtering used.'})\n def __init__(self, **kwargs):\n # Get the arguments to pass to NWBFile and remove arguments custum to this class\n intracellular_recordings = kwargs.pop('intracellular_recordings', None)\n icephys_simultaneous_recordings = kwargs.pop('icephys_simultaneous_recordings', None)\n icephys_sequential_recordings = kwargs.pop('icephys_sequential_recordings', None)\n icephys_repetitions = kwargs.pop('icephys_repetitions', None)\n icephys_experimental_conditions = kwargs.pop('icephys_experimental_conditions', None)\n if kwargs.get('sweep_table') is not None:\n warnings.warn(\"Use of SweepTable is deprecated. Use the intracellular_recordings, \"\n \"simultaneous_recordings, sequential_recordings, repetitions and/or \"\n \"experimental_conditions table(s) instead.\", DeprecationWarning)\n # Initialize the NWBFile parent class\n pargs, pkwargs = fmt_docval_args(super().__init__, kwargs)\n super().__init__(*pargs, **pkwargs)\n # Set ic filtering if requested\n self.ic_filtering = kwargs.get('ic_filtering')\n # Set the intracellular_recordings if available\n setattr(self, 'intracellular_recordings', intracellular_recordings)\n setattr(self, 'icephys_simultaneous_recordings', icephys_simultaneous_recordings)\n setattr(self, 'icephys_sequential_recordings', icephys_sequential_recordings)\n setattr(self, 'icephys_repetitions', icephys_repetitions)\n setattr(self, 'icephys_experimental_conditions', icephys_experimental_conditions)\n\n @property\n def ic_filtering(self):\n return self.fields.get('ic_filtering')\n\n @ic_filtering.setter\n def ic_filtering(self, val):\n if val is not None:\n warnings.warn(\"Use of ic_filtering is deprecated. Use the IntracellularElectrode.filtering\"\n \"field instead\", DeprecationWarning)\n self.fields['ic_filtering'] = val\n\n @docval(*get_docval(NWBFile.add_stimulus),\n {'name': 'use_sweep_table', 'type': bool, 'default': False, 'doc': 'Use the deprecated SweepTable'})\n def add_stimulus(self, **kwargs):\n \"\"\"\n Overwrite behavior from NWBFile to avoid use of the deprecated SweepTable\n \"\"\"\n timeseries = popargs('timeseries', kwargs)\n self._add_stimulus_internal(timeseries)\n use_sweep_table = popargs('use_sweep_table', kwargs)\n if use_sweep_table:\n if self.sweep_table is None:\n warnings.warn(\"Use of SweepTable is deprecated. Use the IntracellularRecordingsTable, \"\n \"SimultaneousRecordingsTable tables instead. See the add_intracellular_recordings, \"\n \"add_icephsy_simultaneous_recording, add_icephys_sequential_recording, \"\n \"add_icephys_repetition, add_icephys_condition functions.\",\n DeprecationWarning)\n self._update_sweep_table(timeseries)\n\n @docval(*get_docval(NWBFile.add_stimulus),\n {'name': 'use_sweep_table', 'type': bool, 'default': False, 'doc': 'Use the deprecated SweepTable'})\n def add_stimulus_template(self, **kwargs):\n \"\"\"\n Overwrite behavior from NWBFile to avoid use of the deprecated SweepTable\n \"\"\"\n timeseries = popargs('timeseries', kwargs)\n self._add_stimulus_template_internal(timeseries)\n use_sweep_table = popargs('use_sweep_table', kwargs)\n if use_sweep_table:\n if self.sweep_table is None:\n warnings.warn(\"Use of SweepTable is deprecated. Use the IntracellularRecordingsTable, \"\n \"SimultaneousRecordingsTable tables instead. See the add_intracellular_recordings, \"\n \"add_icephsy_simultaneous_recording, add_icephys_sequential_recording, \"\n \"add_icephys_repetition, add_icephys_condition functions.\",\n DeprecationWarning)\n self._update_sweep_table(timeseries)\n\n @docval(*get_docval(NWBFile.add_acquisition),\n {'name': 'use_sweep_table', 'type': bool, 'default': False, 'doc': 'Use the deprecated SweepTable'})\n def add_acquisition(self, **kwargs):\n \"\"\"\n Overwrite behavior from NWBFile to avoid use of the deprecated SweepTable\n \"\"\"\n nwbdata = popargs('nwbdata', kwargs)\n self._add_acquisition_internal(nwbdata)\n use_sweep_table = popargs('use_sweep_table', kwargs)\n if use_sweep_table:\n if self.sweep_table is None:\n warnings.warn(\"Use of SweepTable is deprecated. Use the IntracellularRecordingsTable, \"\n \"SimultaneousRecordingsTable tables instead. See the add_intracellular_recordings, \"\n \"add_icephsy_simultaneous_recording, add_icephys_sequential_recording, \"\n \"add_icephys_repetition, add_icephys_condition functions.\",\n DeprecationWarning)\n self._update_sweep_table(nwbdata)\n\n @docval(returns='The NWBFile.intracellular_recordings table', rtype=IntracellularRecordingsTable)\n def get_intracellular_recordings(self):\n \"\"\"\n Get the NWBFile.intracellular_recordings table.\n\n In contrast to NWBFile.intracellular_recordings, this function will create the\n IntracellularRecordingsTable table if not yet done, whereas NWBFile.intracellular_recordings\n will return None if the table is currently not being used.\n \"\"\"\n if self.intracellular_recordings is None:\n self.intracellular_recordings = IntracellularRecordingsTable()\n return self.intracellular_recordings\n\n @docval(*get_docval(IntracellularRecordingsTable.add_recording),\n returns='Integer index of the row that was added to IntracellularRecordingsTable',\n rtype=int,\n allow_extra=True)\n def add_intracellular_recording(self, **kwargs):\n \"\"\"\n Add a intracellular recording to the intracellular_recordings table. If the\n electrode, stimulus, and/or response do not exsist yet in the NWBFile, then\n they will be added to this NWBFile before adding them to the table.\n \"\"\"\n # Add the stimulus, response, and electrode to the file if they don't exist yet\n stimulus, response, electrode = getargs('stimulus', 'response', 'electrode', kwargs)\n if (stimulus is not None and\n (stimulus.name not in self.stimulus and\n stimulus.name not in self.stimulus_template)):\n self.add_stimulus(stimulus, use_sweep_table=False)\n if response is not None and response.name not in self.acquisition:\n self.add_acquisition(response, use_sweep_table=False)\n if electrode is not None and electrode.name not in self.icephys_electrodes:\n self.add_icephys_electrode(electrode)\n # make sure the intracellular recordings table exists and if not create it using get_intracellular_recordings\n # Add the recoding to the intracellular_recordings table\n return call_docval_func(self.get_intracellular_recordings().add_recording, kwargs)\n\n @docval(returns='The NWBFile.icephys_simultaneous_recordings table', rtype=SimultaneousRecordingsTable)\n def get_icephys_simultaneous_recordings(self):\n \"\"\"\n Get the NWBFile.icephys_simultaneous_recordings table.\n\n In contrast to NWBFile.icephys_simultaneous_recordings, this function will create the\n SimultaneousRecordingsTable table if not yet done, whereas NWBFile.icephys_simultaneous_recordings\n will return None if the table is currently not being used.\n \"\"\"\n if self.icephys_simultaneous_recordings is None:\n self.icephys_simultaneous_recordings = SimultaneousRecordingsTable(self.get_intracellular_recordings())\n return self.icephys_simultaneous_recordings\n\n @docval(*get_docval(SimultaneousRecordingsTable.add_simultaneous_recording),\n returns='Integer index of the row that was added to SimultaneousRecordingsTable',\n rtype=int,\n allow_extra=True)\n def add_icephys_simultaneous_recording(self, **kwargs):\n \"\"\"\n Add a new simultaneous recording to the icephys_simultaneous_recordings table\n \"\"\"\n return call_docval_func(self.get_icephys_simultaneous_recordings().add_simultaneous_recording, kwargs)\n\n @docval(returns='The NWBFile.icephys_sequential_recordings table', rtype=SequentialRecordingsTable)\n def get_icephys_sequential_recordings(self):\n \"\"\"\n Get the NWBFile.icephys_sequential_recordings table.\n\n In contrast to NWBFile.icephys_sequential_recordings, this function will create the\n IntracellularRecordingsTable table if not yet done, whereas NWBFile.icephys_sequential_recordings\n will return None if the table is currently not being used.\n \"\"\"\n if self.icephys_sequential_recordings is None:\n self.icephys_sequential_recordings = SequentialRecordingsTable(self.get_icephys_simultaneous_recordings())\n return self.icephys_sequential_recordings\n\n @docval(*get_docval(SequentialRecordingsTable.add_sequential_recording),\n returns='Integer index of the row that was added to SequentialRecordingsTable',\n rtype=int,\n allow_extra=True)\n def add_icephys_sequential_recording(self, **kwargs):\n \"\"\"\n Add a new sequential recording to the icephys_sequential_recordings table\n \"\"\"\n self.get_icephys_sequential_recordings()\n return call_docval_func(self.icephys_sequential_recordings.add_sequential_recording, kwargs)\n\n @docval(returns='The NWBFile.icephys_repetitions table', rtype=RepetitionsTable)\n def get_icephys_repetitions(self):\n \"\"\"\n Get the NWBFile.icephys_repetitions table.\n\n In contrast to NWBFile.icephys_repetitions, this function will create the\n RepetitionsTable table if not yet done, whereas NWBFile.icephys_repetitions\n will return None if the table is currently not being used.\n \"\"\"\n if self.icephys_repetitions is None:\n self.icephys_repetitions = RepetitionsTable(self.get_icephys_sequential_recordings())\n return self.icephys_repetitions\n\n @docval(*get_docval(RepetitionsTable.add_repetition),\n returns='Integer index of the row that was added to RepetitionsTable',\n rtype=int,\n allow_extra=True)\n def add_icephys_repetition(self, **kwargs):\n \"\"\"\n Add a new repetition to the RepetitionsTable table\n \"\"\"\n return call_docval_func(self.get_icephys_repetitions().add_repetition, kwargs)\n\n @docval(returns='The NWBFile.icephys_experimental_conditions table', rtype=ExperimentalConditionsTable)\n def get_icephys_experimental_conditions(self):\n \"\"\"\n Get the NWBFile.icephys_experimental_conditions table.\n\n In contrast to NWBFile.icephys_experimental_conditions, this function will create the\n RepetitionsTable table if not yet done, whereas NWBFile.icephys_experimental_conditions\n will return None if the table is currently not being used.\n \"\"\"\n if self.icephys_experimental_conditions is None:\n self.icephys_experimental_conditions = ExperimentalConditionsTable(self.get_icephys_repetitions())\n return self.icephys_experimental_conditions\n\n @docval(*get_docval(ExperimentalConditionsTable.add_experimental_condition),\n returns='Integer index of the row that was added to ExperimentalConditionsTable',\n rtype=int,\n allow_extra=True)\n def add_icephys_experimental_condition(self, **kwargs):\n \"\"\"\n Add a new condition to the ExperimentalConditionsTable table\n \"\"\"\n return call_docval_func(self.get_icephys_experimental_conditions().add_experimental_condition, kwargs)\n\n def get_icephys_meta_parent_table(self):\n \"\"\"\n Get the top-most table in the intracellular ephys metadata table hierarchy that exists in this NWBFile.\n\n The intracellular ephys metadata consists of a hierarchy of DynamicTables, i.e.,\n experimental_conditions --> repetitions --> sequential_recordings -->\n simultaneous_recordings --> intracellular_recordings etc.\n In a given NWBFile not all tables may exist. This convenience functions returns the top-most\n table that exists in this file. E.g., if the file contains only the simultaneous_recordings\n and intracellular_recordings tables then the function would return the simultaneous_recordings table.\n Similarly, if the file contains all tables then it will return the experimental_conditions table.\n\n :returns: DynamicTable object or None\n \"\"\"\n if self.icephys_experimental_conditions is not None:\n return self.icephys_experimental_conditions\n elif self.icephys_repetitions is not None:\n return self.icephys_repetitions\n elif self.icephys_sequential_recordings is not None:\n return self.icephys_sequential_recordings\n elif self.icephys_simultaneous_recordings is not None:\n return self.icephys_simultaneous_recordings\n elif self.intracellular_recordings is not None:\n return self.intracellular_recordings\n else:\n return None\n"
] | [
[
"pandas.concat",
"numpy.array",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
XINZHANG-ops/OwnUtilities | [
"50a4be781706082e2f50705c16be3fc54a6d9e06"
] | [
"model_trainingtime_prediction/model_level_utils.py"
] | [
"\"\"\"\n****************************************\n * @author: Xin Zhang\n * Date: 5/22/21\n****************************************\n\"\"\"\nimport time\nimport tensorflow.keras as keras\nimport pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom random import sample\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import OneHotEncoder\nimport copy\n\nactivation_fcts = [\n 'relu', \"sigmoid\", \"softmax\", \"softplus\", \"softsign\", \"tanh\", \"selu\", \"elu\", \"exponential\"\n]\noptimizers = [\"sgd\", \"rmsprop\", \"adam\", \"adadelta\", \"adagrad\", \"adamax\", \"nadam\", \"ftrl\"]\nlosses = [\"mae\", \"mape\", \"mse\", \"msle\", \"poisson\", \"categorical_crossentropy\"]\n\n\nclass TimeHistory(keras.callbacks.Callback):\n def on_train_begin(self, logs={}):\n self.train_start_time = time.time()\n self.epoch_times = []\n self.batch_times = []\n self.epoch_times_detail = []\n self.batch_times_detail = []\n\n def on_train_end(self, logs={}):\n self.train_end_time = time.time()\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_time_start = time.time()\n\n def on_epoch_end(self, epoch, logs={}):\n epoch_time_end = time.time()\n self.epoch_times.append(epoch_time_end - self.epoch_time_start)\n self.epoch_times_detail.append((self.epoch_time_start, epoch_time_end))\n\n def on_train_batch_begin(self, batch, logs={}):\n self.bacth_time_start = time.time()\n\n def on_train_batch_end(self, batch, logs={}):\n batch_time_end = time.time()\n self.batch_times.append(batch_time_end - self.bacth_time_start)\n self.batch_times_detail.append((self.bacth_time_start, batch_time_end))\n\n def relative_by_train_start(self):\n self.epoch_times_detail = np.array(self.epoch_times_detail) - self.train_start_time\n self.batch_times_detail = np.array(self.batch_times_detail) - self.train_start_time\n self.train_end_time = np.array(self.train_end_time) - self.train_start_time\n\n\nclass gen_nn:\n def __init__(\n self,\n hidden_layers_num_lower=5,\n hidden_layers_num_upper=101,\n hidden_layer_size_lower=1,\n hidden_layer_size_upper=1001,\n activation='random',\n optimizer='random',\n loss='random'\n ):\n self.hidden_layers_num_lower = hidden_layers_num_lower\n self.hidden_layers_num_upper = hidden_layers_num_upper\n self.hidden_layer_size_lower = hidden_layer_size_lower\n self.hidden_layer_size_upper = hidden_layer_size_upper\n self.activation_pick = activation\n self.optimizer_pick = optimizer\n self.loss_pick = loss\n self.activation_fcts = activation_fcts\n self.optimizers = optimizers\n self.losses = losses\n\n @staticmethod\n def nothing(x):\n return x\n\n @staticmethod\n def build_dense_model(layer_sizes, activations, optimizer, loss):\n model_dense = Sequential()\n for index, size in enumerate(layer_sizes):\n model_dense.add(Dense(size, activation=activations[index]))\n model_dense.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])\n return model_dense\n\n @staticmethod\n def get_dense_model_features(keras_model):\n layers = [\n layer_info for layer_info in keras_model.get_config()['layers']\n if layer_info['class_name'] == 'Dense'\n ]\n layer_sizes = [l['config']['units'] for l in layers]\n acts = [l['config']['activation'].lower() for l in layers]\n return layer_sizes, acts\n\n def generate_model(self):\n hidden_layers_num = np.random.randint(\n self.hidden_layers_num_lower, self.hidden_layers_num_upper\n )\n hidden_layer_sizes = np.random.randint(\n self.hidden_layer_size_lower, self.hidden_layer_size_upper, hidden_layers_num\n )\n\n if self.activation_pick == 'random':\n activations = np.random.choice(self.activation_fcts, hidden_layers_num)\n else:\n activations = np.random.choice([self.activation_pick], hidden_layers_num)\n if self.optimizer_pick == 'random':\n optimizer = np.random.choice(self.optimizers)\n else:\n optimizer = self.optimizer_pick\n if self.loss_pick == 'random':\n loss = np.random.choice(self.losses)\n else:\n loss = self.loss_pick\n\n return {\n 'model': gen_nn.build_dense_model(hidden_layer_sizes, activations, optimizer, loss),\n 'layer_sizes': [int(i) for i in hidden_layer_sizes],\n 'activations': list(activations),\n 'optimizer': optimizer,\n 'loss': loss\n }\n\n def generate_model_configs(self, num_model_data=1000, progress=True):\n model_configs = []\n if progress:\n loop_fun = tqdm\n else:\n loop_fun = gen_nn.nothing\n for i in loop_fun(range(num_model_data)):\n data = self.generate_model()\n del data['model']\n model_configs.append(data)\n return model_configs\n\n\nclass model_train_data:\n def __init__(\n self,\n model_configs,\n input_dims=None,\n batch_sizes=None,\n epochs=None,\n truncate_from=None,\n trials=None,\n batch_strategy='random',\n input_dim_strategy='same'\n ):\n \"\"\"\n\n @param model_configs:\n @param input_dims: input data number of features\n @param batch_sizes:\n @param epochs:\n @param truncate_from:\n @param trials:\n @param input_dim_strategy: 'random' or 'same', same will be same size as first layer size\n \"\"\"\n self.model_configs = []\n for info_dict in model_configs:\n d2 = copy.deepcopy(info_dict)\n self.model_configs.append(d2)\n self.input_dims = input_dims if input_dims is not None else list(range(1, 1001))\n self.batch_sizes = batch_sizes if batch_sizes is not None else [2**i for i in range(1, 9)]\n self.epochs = epochs if epochs is not None else 10\n self.truncate_from = truncate_from if truncate_from is not None else 2\n self.trials = trials if trials is not None else 5\n self.batch_strategy = batch_strategy\n self.input_dim_strategy = input_dim_strategy\n self.activation_fcts = activation_fcts\n self.optimizers = optimizers\n self.losses = losses\n self.act_mapping = dict((act, index + 1) for index, act in enumerate(self.activation_fcts))\n self.opt_mapping = dict((opt, index + 1) for index, opt in enumerate(self.optimizers))\n self.loss_mapping = dict((loss, index + 1) for index, loss in enumerate(self.losses))\n\n def get_train_data(self, progress=True):\n model_data = []\n model_configs = []\n if progress:\n loop_fun = tqdm\n else:\n loop_fun = gen_nn.nothing\n for info_dict in self.model_configs:\n d2 = copy.deepcopy(info_dict)\n model_configs.append(d2)\n for model_config in loop_fun(model_configs):\n model = gen_nn.build_dense_model(\n layer_sizes=model_config['layer_sizes'],\n activations=model_config['activations'],\n optimizer=model_config['optimizer'],\n loss=model_config['loss']\n )\n if self.batch_strategy == 'all':\n batch_sizes = self.batch_sizes.copy()\n else:\n batch_sizes = sample(self.batch_sizes, 1)\n input_dim = sample(self.input_dims, 1)[0]\n for batch_size in batch_sizes:\n batch_size_data_batch = []\n batch_size_data_epoch = []\n if self.input_dim_strategy == 'same':\n try:\n input_shape = model.get_config()['layers'][0]['config']['units']\n except:\n input_shape = model.get_config(\n )['layers'][0]['config']['batch_input_shape'][1]\n else:\n input_shape = input_dim\n out_shape = model.get_config()['layers'][-1]['config']['units']\n x = np.ones((batch_size, input_shape), dtype=np.float32)\n y = np.ones((batch_size, out_shape), dtype=np.float32)\n for _ in range(self.trials):\n time_callback = TimeHistory()\n model.fit(\n x,\n y,\n epochs=self.epochs,\n batch_size=batch_size,\n callbacks=[time_callback],\n verbose=False\n )\n times_batch = np.array(time_callback.batch_times) * 1000\n times_epoch = np.array(time_callback.epoch_times) * 1000\n batch_size_data_batch.extend(times_batch)\n batch_size_data_epoch.extend(times_epoch)\n\n batch_times_truncated = batch_size_data_batch[self.truncate_from:]\n epoch_times_trancuted = batch_size_data_epoch[self.truncate_from:]\n recovered_time = [\n np.median(batch_times_truncated)\n ] * self.truncate_from + batch_times_truncated\n\n model_config[f'batch_size_{batch_size}'] = {\n 'batch_time': np.median(batch_times_truncated),\n 'epoch_time': np.median(epoch_times_trancuted),\n 'setup_time': np.sum(batch_size_data_batch) - sum(recovered_time),\n 'input_dim': input_dim\n }\n model_data.append(model_config)\n return model_data\n\n def convert_config_data(\n self,\n model_data,\n layer_num_upper,\n layer_na_fill=0,\n act_na_fill=0,\n opt_dummy=True,\n loss_dummy=True,\n min_max_scaler=True\n ):\n data_rows = []\n time_rows = []\n\n for model_i_data in model_data:\n layer_sizes = model_i_data['layer_sizes'] + [layer_na_fill] * layer_num_upper\n layer_sizes = layer_sizes[:layer_num_upper]\n activations = [self.act_mapping[i]\n for i in model_i_data['activations']] + [act_na_fill] * layer_num_upper\n activations = activations[:layer_num_upper]\n if opt_dummy:\n optimizer = model_i_data['optimizer']\n else:\n optimizer = self.opt_mapping[model_i_data['optimizer']]\n if loss_dummy:\n loss = model_i_data['loss']\n else:\n loss = self.loss_mapping[model_i_data['loss']]\n batch_names = [k for k in model_i_data.keys() if k.startswith('batch_size')]\n\n for batch_name in batch_names:\n batch_value = int(batch_name.split('_')[-1])\n batch_time = model_i_data[batch_name]['batch_time']\n epoch_time = model_i_data[batch_name]['epoch_time']\n setup_time = model_i_data[batch_name]['setup_time']\n input_dim = model_i_data[batch_name]['input_dim']\n data_rows.append(\n layer_sizes + activations + [optimizer, loss, batch_value, input_dim]\n )\n time_rows.append([batch_time, epoch_time, setup_time])\n\n layer_names = [f'layer_{i + 1}_size' for i in range(layer_num_upper)]\n act_names = [f'layer_{i + 1}_activation' for i in range(layer_num_upper)]\n temp_df = pd.DataFrame(\n data_rows,\n columns=layer_names + act_names + ['optimizer', 'loss', 'batch_size', 'input_dim']\n )\n if opt_dummy:\n first_row = dict(temp_df.iloc[0])\n for opt in self.optimizers:\n first_row['optimizer'] = opt\n temp_df = temp_df.append(first_row, ignore_index=True)\n temp_df = pd.get_dummies(temp_df, columns=['optimizer'])\n temp_df = temp_df.drop(temp_df.index.tolist()[-len(self.optimizers):])\n if loss_dummy:\n first_row = dict(temp_df.iloc[0])\n for los in self.losses:\n first_row['loss'] = los\n temp_df = temp_df.append(first_row, ignore_index=True)\n temp_df = pd.get_dummies(temp_df, columns=['loss'])\n temp_df = temp_df.drop(temp_df.index.tolist()[-len(self.losses):])\n time_df = pd.DataFrame(time_rows, columns=['batch_time', 'epoch_time', 'setup_time'])\n if min_max_scaler:\n scaler = MinMaxScaler()\n scaled_data = scaler.fit_transform(temp_df.to_numpy())\n temp_df = pd.DataFrame(scaled_data, columns=temp_df.columns)\n return pd.concat([temp_df, time_df], axis=1), scaler\n else:\n return pd.concat([temp_df, time_df], axis=1), None\n\n def convert_model_data(\n self,\n keras_model,\n layer_num_upper,\n optimizer,\n loss,\n batch_size,\n input_dim=None,\n layer_na_fill=0,\n act_na_fill=0,\n scaler=None,\n opt_dummy=True,\n loss_dummy=True,\n ):\n layer_sizes, acts = gen_nn.get_dense_model_features(keras_model)\n if input_dim is None:\n input_dim = layer_sizes[0]\n layer_sizes = layer_sizes + [layer_na_fill] * layer_num_upper\n layer_sizes = layer_sizes[:layer_num_upper]\n acts = [self.act_mapping[i] for i in acts]\n acts = acts + [act_na_fill] * layer_num_upper\n acts = acts[:layer_num_upper]\n if opt_dummy:\n optimizer = optimizer.lower()\n else:\n optimizer = self.opt_mapping[optimizer.lower()]\n if loss_dummy:\n loss = loss.lower()\n else:\n loss = self.loss_mapping[loss.lower()]\n data = layer_sizes + acts + [optimizer, loss, batch_size, input_dim]\n layer_names = [f'layer_{i + 1}_size' for i in range(layer_num_upper)]\n act_names = [f'layer_{i + 1}_activation' for i in range(layer_num_upper)]\n temp_df = pd.DataFrame([data],\n columns=layer_names + act_names +\n ['optimizer', 'loss', 'batch_size', 'input_dim'])\n if opt_dummy:\n first_row = dict(temp_df.iloc[0])\n for opt in self.optimizers:\n first_row['optimizer'] = opt\n temp_df = temp_df.append(first_row, ignore_index=True)\n temp_df = pd.get_dummies(temp_df, columns=['optimizer'])\n temp_df = temp_df.drop(temp_df.index.tolist()[-len(self.optimizers):])\n if loss_dummy:\n first_row = dict(temp_df.iloc[0])\n for los in self.losses:\n first_row['loss'] = los\n temp_df = temp_df.append(first_row, ignore_index=True)\n temp_df = pd.get_dummies(temp_df, columns=['loss'])\n temp_df = temp_df.drop(temp_df.index.tolist()[-len(self.losses):])\n\n if scaler is None:\n return temp_df\n else:\n scaled_data = scaler.transform(temp_df.to_numpy())\n return pd.DataFrame(scaled_data, columns=temp_df.columns)\n\n\nclass convert_dense_data:\n def __init__(self):\n self.optimizers = optimizers\n\n unique_all_optimizers = sorted(list(set(self.optimizers)))\n enc = OneHotEncoder(handle_unknown='ignore')\n x_opts = [[i] for i in unique_all_optimizers]\n enc.fit(x_opts)\n self.enc = enc\n\n @staticmethod\n def dense_layer_flops(i, o):\n return (2 * i - 1) * o\n\n @staticmethod\n def get_flops_dense(input_shape, dense_model_obj, sum_all=True):\n dense_flops = []\n for idx, layer_data in enumerate(dense_model_obj.get_config()['layers']):\n layer_name = layer_data['class_name']\n layer_config = layer_data['config']\n if layer_name == 'Dense':\n flops = convert_dense_data.dense_layer_flops(input_shape, layer_config['units'])\n input_shape = layer_config['units']\n dense_flops.append(flops)\n if sum_all:\n return sum(dense_flops)\n else:\n return dense_flops\n\n @staticmethod\n def get_units_sum_dense_keras(dense_model_obj):\n return sum([\n layer['config']['units'] for layer in dense_model_obj.get_config()['layers']\n if layer['class_name'] == 'Dense'\n ])\n\n def convert_model_config(self, model_config_dense, data_type='Units', min_max_scaler=True):\n \"\"\"\n\n @param model_config_dense:\n @param data_type: str \"Units\" or \"FLOPs\"\n @param min_max_scaler:\n @return:\n \"\"\"\n all_batch_sizes = []\n all_optimizers = []\n flops_data = []\n units_data = []\n times_data = []\n for index, model_config in enumerate(tqdm(model_config_dense)):\n batch_name = [i for i in model_config.keys() if i.startswith('batch_size')][0]\n input_shape = model_config[batch_name]['input_dim']\n batch_size = int(batch_name.split('_')[-1])\n all_batch_sizes.append(batch_size)\n all_optimizers.append(model_config['optimizer'])\n if data_type.lower().startswith('f'):\n model = gen_nn.build_dense_model(\n layer_sizes=model_config['layer_sizes'],\n activations=model_config['activations'],\n optimizer=model_config['optimizer'],\n loss=model_config['loss']\n )\n\n flops = convert_dense_data.get_flops_dense(input_shape, model, sum_all=True)\n flops_data.append(flops)\n units_data.append(sum(model_config['layer_sizes']))\n times_data.append(model_config[batch_name]['batch_time'])\n\n if data_type.lower().startswith('u'):\n layer_data = units_data.copy()\n elif data_type.lower().startswith('f'):\n layer_data = flops_data.copy()\n else:\n layer_data = units_data.copy()\n\n dense_data = []\n for size, batch, opt in tqdm(list(zip(layer_data, all_batch_sizes, all_optimizers))):\n optimizer_onehot = list(self.enc.transform([[opt]]).toarray()[0])\n dense_data.append([size] + [batch] + optimizer_onehot)\n\n if min_max_scaler:\n scaler = MinMaxScaler()\n scaler.fit(dense_data)\n scaler_dense_data = scaler.transform(dense_data)\n return scaler_dense_data, np.array(times_data), scaler\n else:\n return dense_data, np.array(times_data), None\n\n def convert_model_keras(\n self, input_shape, dense_model_obj, optimizer, batch_size, data_type='Unit', scaler=None\n ):\n flops = convert_dense_data.get_flops_dense(input_shape, dense_model_obj, sum_all=True)\n unit_sum = convert_dense_data.get_units_sum_dense_keras(dense_model_obj)\n\n if data_type.lower().startswith('f'):\n layer_data = flops\n elif data_type.lower().startswith('u'):\n layer_data = unit_sum\n else:\n layer_data = unit_sum\n\n optimizer_onehot = list(self.enc.transform([[optimizer]]).toarray()[0])\n layer_data = [layer_data] + [batch_size] + optimizer_onehot\n\n if scaler is not None:\n scaled_data = scaler.transform(np.array([layer_data]))\n return scaled_data\n else:\n return layer_data\n\n\ndef demo():\n import random\n import matplotlib.pyplot as plt\n # whole pipeline demo\n\n # generate model configurations as data points\n data_points = 1000\n gnn = gen_nn(\n hidden_layers_num_lower=1,\n hidden_layers_num_upper=51,\n hidden_layer_size_lower=1,\n hidden_layer_size_upper=1001,\n activation='random',\n optimizer='random',\n loss='random'\n )\n model_configs = gnn.generate_model_configs(num_model_data=data_points)\n\n # train generated model configurations to get training time\n mtd = model_train_data(\n model_configs,\n input_dims=list(range(1, 1001)),\n batch_sizes=[2**i for i in range(1, 9)],\n epochs=5,\n truncate_from=1,\n trials=2,\n batch_strategy='random',\n )\n model_data = mtd.get_train_data()\n\n # convert raw data as dataframe and scaler\n df, scaler = mtd.convert_config_data(\n model_data, layer_num_upper=50, layer_na_fill=0, act_na_fill=0, min_max_scaler=True\n )\n\n # use data to train a ML model\n test_ratio = 0.2\n df_index = df.index.tolist()\n np.random.shuffle(df_index)\n\n middle_index = int(df.shape[0] * test_ratio)\n test_idx = df_index[:middle_index]\n train_idx = df_index[middle_index:]\n\n df_train = df.iloc[train_idx]\n df_test = df.iloc[test_idx]\n\n # we need to train 2 models, one to predict batch runtime, one to predict setup time\n # combine both will be the true training time of a model\n feature_cols = df.columns.tolist()[:-3]\n target_col = 'batch_time'\n setup_col = 'setup_time'\n\n x_train = df_train[feature_cols].to_numpy()\n y_batch_train = np.array(df_train[target_col].tolist())\n y_setup_train = np.array(df_train[setup_col].tolist())\n\n x_test = df_test[feature_cols].to_numpy()\n y_batch_test = np.array(df_test[target_col].tolist())\n y_setup_test = np.array(df_test[setup_col].tolist())\n\n # build a regular dense model for batch time prediction\n from keras.models import Sequential\n from keras.layers import Dense\n\n batch_model = Sequential()\n batch_model.add(\n Dense(200, input_dim=x_train.shape[1], kernel_initializer='normal', activation='relu')\n )\n batch_model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n batch_model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n batch_model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n batch_model.add(Dense(1, kernel_initializer='normal'))\n # Compile model\n batch_model.compile(loss='mean_squared_error', optimizer='adam')\n\n history_batch = batch_model.fit(\n x_train,\n y_batch_train,\n batch_size=16,\n epochs=50,\n validation_data=(x_test, y_batch_test),\n verbose=True\n )\n\n # summarize history for loss\n plt.plot(history_batch.history['loss'])\n plt.plot(history_batch.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n # plot predictions vs true for batch model\n batch_y_pred = batch_model.predict(x_test)\n batch_y_pred = batch_y_pred.reshape(batch_y_pred.shape[0], )\n plt.scatter(batch_y_pred, y_batch_test)\n plt.show()\n\n # build a dense model for setup time prediction\n setup_model = Sequential()\n setup_model.add(\n Dense(200, input_dim=x_train.shape[1], kernel_initializer='normal', activation='relu')\n )\n setup_model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n setup_model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n setup_model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n setup_model.add(Dense(1, kernel_initializer='normal'))\n # Compile model\n setup_model.compile(loss='mean_squared_error', optimizer='adam')\n history_setup = setup_model.fit(\n x_train,\n y_setup_train,\n batch_size=16,\n epochs=45,\n validation_data=(x_test, y_setup_test),\n verbose=True\n )\n\n # summarize history for loss\n plt.plot(history_setup.history['loss'])\n plt.plot(history_setup.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n # plot predictions vs true for setup time model\n setup_y_pred = setup_model.predict(x_test)\n setup_y_pred = setup_y_pred.reshape(setup_y_pred.shape[0], )\n plt.scatter(setup_y_pred, y_setup_test)\n plt.show()\n\n # validate on a real case\n val_data_points = 100\n val_genn = gen_nn(\n hidden_layers_num_lower=50,\n hidden_layers_num_upper=51,\n hidden_layer_size_lower=1,\n hidden_layer_size_upper=1001,\n activation='random',\n optimizer='random',\n loss='random'\n )\n val_model_configs = val_genn.generate_model_configs(num_model_data=val_data_points)\n\n # collect all info during training\n real_time_process_first_batchs = []\n real_time_batchs = []\n real_time_epochs = []\n real_time_start_ends = []\n y_val_preds_batch = []\n y_val_preds_setup = []\n batch_sizes_collect = []\n epochs_collect = []\n data_points_collect = []\n\n mtd_val = model_train_data([])\n for m_config in tqdm(val_model_configs):\n # here we consider changeable data size and epoch\n batch_size_val = random.sample(mtd_val.batch_sizes, 1)[0]\n epochs_val = random.sample([2, 3, 4, 5], 1)[0]\n data_dim_val = random.sample(list(range(1, 1001)), 1)[0]\n data_size_val = random.sample([5000, 10000, 15000, 1000], 1)[0]\n data_points_collect.append(data_size_val)\n batch_sizes_collect.append(batch_size_val)\n epochs_collect.append(epochs_val)\n\n model_val = gen_nn.build_dense_model(\n layer_sizes=m_config['layer_sizes'],\n activations=m_config['activations'],\n optimizer=m_config['optimizer'],\n loss=m_config['loss']\n )\n\n out_shape = model_val.get_config()['layers'][-1]['config']['units']\n x = np.ones((data_size_val, data_dim_val), dtype=np.float32)\n y = np.ones((data_size_val, out_shape), dtype=np.float32)\n\n time_callback = TimeHistory()\n model_val.fit(\n x,\n y,\n epochs=epochs_val,\n batch_size=batch_size_val,\n callbacks=[time_callback],\n verbose=False\n )\n\n batch_median = np.median(time_callback.batch_times[2:])\n # remove first batch to remove the effect of setup, and compensate with median batch time\n real_time_process_first_batchs.append(\n sum([batch_median] + time_callback.batch_times[1:]) * 1000\n )\n real_time_batchs.append(sum(time_callback.batch_times) * 1000)\n real_time_epochs.append(sum(time_callback.epoch_times) * 1000)\n real_time_start_ends.append(\n (time_callback.train_end_time - time_callback.train_start_time) * 1000\n )\n\n train_batch_numbers = math.ceil(data_size_val / batch_size_val) * epochs_val\n\n x_val = mtd_val.convert_model_data(\n model_val,\n 50,\n m_config['optimizer'],\n m_config['loss'],\n batch_size_val,\n data_dim_val,\n layer_na_fill=0,\n act_na_fill=0,\n scaler=scaler\n ).to_numpy()\n y_val_pred_batch = batch_model.predict(x_val)\n y_val_pred_batch = y_val_pred_batch.reshape(y_val_pred_batch.shape[0], )[0]\n y_val_preds_batch.append(y_val_pred_batch * train_batch_numbers)\n\n y_val_pred_setup = setup_model.predict(x_val)\n y_val_pred_setup = y_val_pred_setup.reshape(y_val_pred_setup.shape[0], )[0]\n y_val_preds_setup.append(y_val_pred_setup)\n\n # define a function to calculate error\n def cal_score(pred, real, absolute=False):\n pred = np.array(pred).copy()\n real = np.array(real).copy()\n if absolute:\n return abs((pred - real) / real)\n else:\n return (pred - real) / real\n\n # x-axis\n x = range(len(y_val_preds_batch))\n\n # only use prediction from batch model and see error for no setup time\n plt.scatter(x, cal_score(y_val_preds_batch, real_time_process_first_batchs))\n plt.plot(x, [0.15] * len(x), c='r', linewidth=10)\n plt.plot(x, [-0.15] * len(x), c='r', linewidth=10)\n plt.title('trucated batch time error')\n plt.show()\n\n # see error of setup time model\n plt.scatter(\n x,\n cal_score(\n y_val_preds_setup,\n np.array(real_time_batchs) - np.array(real_time_process_first_batchs)\n )\n )\n plt.plot(x, [0.15] * len(x), c='r', linewidth=10)\n plt.plot(x, [-0.15] * len(x), c='r', linewidth=10)\n plt.title('setup time error')\n plt.show()\n\n # see error for true model time prediction, combine results from batch model and setup model\n plt.scatter(\n x,\n cal_score(np.array(y_val_preds_setup) + np.array(y_val_preds_batch), real_time_start_ends)\n )\n plt.plot(x, [0.15] * len(x), c='r', linewidth=10)\n plt.plot(x, [-0.15] * len(x), c='r', linewidth=10)\n plt.title('real batch time error, added pred setup time')\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"sklearn.preprocessing.MinMaxScaler",
"numpy.random.randint",
"pandas.concat",
"matplotlib.pyplot.title",
"numpy.random.choice",
"numpy.median",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"sklearn.preprocessing.OneHotEncoder",
"numpy.random.shuffle",
"numpy.ones",
"matplotlib.pyplot.xlabel",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Kursula/Color_palette | [
"4eae43845caae3a5a43337b38097ff4f54787d42"
] | [
"som.py"
] | [
"\"\"\"\n2-D Self-Organizing Map with Gaussian Neighbourhood function\nand linearly decreasing learning rate.\n\nThis is mostly using code from SACHIN JOGLEKAR'S blog:\nhttps://codesachin.wordpress.com/2015/11/28/self-organizing-maps-with-googles-tensorflow/\n\nThe code has been ported to Tensorflow v 1.6.0 by Mikko Kursula. \n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n \nclass SOM(object):\n \n #To check if the SOM has been trained\n _trained = False\n \n def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):\n \"\"\"\n Initializes all necessary components of the TensorFlow\n Graph.\n \n m X n are the dimensions of the SOM. 'n_iterations' should\n should be an integer denoting the number of iterations undergone\n while training.\n 'dim' is the dimensionality of the training inputs.\n 'alpha' is a number denoting the initial time(iteration no)-based\n learning rate. Default value is 0.3\n 'sigma' is the the initial neighbourhood value, denoting\n the radius of influence of the BMU while training. By default, its\n taken to be half of max(m, n).\n \"\"\"\n \n #Assign required variables first\n self._m = m\n self._n = n\n if alpha is None:\n alpha = 0.3\n else:\n alpha = float(alpha)\n if sigma is None:\n sigma = max(m, n) / 2.0\n else:\n sigma = float(sigma)\n self._n_iterations = abs(int(n_iterations))\n \n ##INITIALIZE GRAPH\n self._graph = tf.Graph()\n \n ##POPULATE GRAPH WITH NECESSARY COMPONENTS\n with self._graph.as_default():\n \n ##VARIABLES AND CONSTANT OPS FOR DATA STORAGE\n \n #Randomly initialized weightage vectors for all neurons,\n #stored together as a matrix Variable of size [m*n, dim]\n self._weightage_vects = tf.Variable(tf.random_normal(\n [m*n, dim]))\n \n #Matrix of size [m*n, 2] for SOM grid locations\n #of neurons\n self._location_vects = tf.constant(np.array(\n list(self._neuron_locations(m, n))))\n \n ##PLACEHOLDERS FOR TRAINING INPUTS\n #We need to assign them as attributes to self, since they\n #will be fed in during training\n \n #The training vector\n self._vect_input = tf.placeholder(\"float\", [dim])\n #Iteration number\n self._iter_input = tf.placeholder(\"float\")\n \n ##CONSTRUCT TRAINING OP PIECE BY PIECE\n #Only the final, 'root' training op needs to be assigned as\n #an attribute to self, since all the rest will be executed\n #automatically during training\n \n #To compute the Best Matching Unit given a vector\n #Basically calculates the Euclidean distance between every\n #neuron's weightage vector and the input, and returns the\n #index of the neuron which gives the least value\n bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(\n tf.pow(tf.subtract(self._weightage_vects, tf.stack(\n [self._vect_input for i in range(m*n)])), 2), 1)),\n 0)\n \n #This will extract the location of the BMU based on the BMU's\n #index\n slice_input = tf.pad(tf.reshape(bmu_index, [1]),\n np.array([[0, 1]]))\n bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,\n tf.constant(np.array([1, 2]))),\n [2])\n \n #To compute the alpha and sigma values based on iteration\n #number\n learning_rate_op = tf.subtract(1.0, tf.divide(self._iter_input,\n self._n_iterations))\n _alpha_op = tf.multiply(alpha, learning_rate_op)\n _sigma_op = tf.multiply(sigma, learning_rate_op)\n \n #Construct the op that will generate a vector with learning\n #rates for all neurons, based on iteration number and location\n #wrt BMU.\n bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(\n self._location_vects, tf.stack(\n [bmu_loc for i in range(m*n)])), 2), 1)\n neighbourhood_func = tf.exp(tf.negative(tf.divide(tf.cast(\n bmu_distance_squares, \"float32\"), tf.pow(_sigma_op, 2))))\n learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)\n \n #Finally, the op that will use learning_rate_op to update\n #the weightage vectors of all neurons based on a particular\n #input\n learning_rate_multiplier = tf.stack([tf.tile(tf.slice(\n learning_rate_op, np.array([i]), np.array([1])), [dim])\n for i in range(m*n)])\n weightage_delta = tf.multiply(\n learning_rate_multiplier,\n tf.subtract(tf.stack([self._vect_input for i in range(m*n)]),\n self._weightage_vects)) \n new_weightages_op = tf.add(self._weightage_vects,\n weightage_delta)\n self._training_op = tf.assign(self._weightage_vects,\n new_weightages_op) \n \n ##INITIALIZE SESSION\n self._sess = tf.Session()\n \n ##INITIALIZE VARIABLES\n #init_op = tf.initialize_all_variables()\n init_op = tf.global_variables_initializer()\n self._sess.run(init_op)\n \n def _neuron_locations(self, m, n):\n \"\"\"\n Yields one by one the 2-D locations of the individual neurons\n in the SOM.\n \"\"\"\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])\n \n def train(self, input_vects):\n \"\"\"\n Trains the SOM.\n 'input_vects' should be an iterable of 1-D NumPy arrays with\n dimensionality as provided during initialization of this SOM.\n Current weightage vectors for all neurons(initially random) are\n taken as starting conditions for training.\n \"\"\"\n \n #Training iterations\n for iter_no in range(self._n_iterations):\n #Train with each vector one by one\n for input_vect in input_vects:\n self._sess.run(self._training_op,\n feed_dict={self._vect_input: input_vect,\n self._iter_input: iter_no})\n \n #Store a centroid grid for easy retrieval later on\n centroid_grid = [[] for i in range(self._m)]\n self._weightages = list(self._sess.run(self._weightage_vects))\n self._locations = list(self._sess.run(self._location_vects))\n for i, loc in enumerate(self._locations):\n centroid_grid[loc[0]].append(self._weightages[i])\n self._centroid_grid = centroid_grid\n \n self._trained = True\n \n def get_centroids(self):\n \"\"\"\n Returns a list of 'm' lists, with each inner list containing\n the 'n' corresponding centroid locations as 1-D NumPy arrays.\n \"\"\"\n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n return self._centroid_grid\n \n def map_vects(self, input_vects):\n \"\"\"\n Maps each input vector to the relevant neuron in the SOM\n grid.\n 'input_vects' should be an iterable of 1-D NumPy arrays with\n dimensionality as provided during initialization of this SOM.\n Returns a list of 1-D NumPy arrays containing (row, column)\n info for each input vector(in the same order), corresponding\n to mapped neuron.\n \"\"\"\n \n if not self._trained:\n raise ValueError(\"SOM not trained yet\")\n \n to_return = []\n for vect in input_vects:\n min_index = min([i for i in range(len(self._weightages))],\n key=lambda x: np.linalg.norm(vect-\n self._weightages[x]))\n to_return.append(self._locations[min_index])\n \n return to_return\n\n\n "
] | [
[
"tensorflow.Graph",
"tensorflow.multiply",
"tensorflow.pow",
"tensorflow.assign",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.divide",
"numpy.linalg.norm",
"tensorflow.global_variables_initializer",
"tensorflow.add",
"tensorflow.Session",
"numpy.array",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
jhonatheberson/artificial-intelligence | [
"8dfe70385bd3721b47a85411be21d9f1846c8ad3"
] | [
"researchMethods/src/fffit/pso.py"
] | [
"\"\"\"Algorithm optimization of particle swarm (PSO).\n\nThe particle optimization algorithm is a metaheuristic, it attempts to optimize\na problem interactively with a swarm of particles percoorendo to a mathematical\nfunction or search space.\n\nthe algorithm has the following steps:\n 1 - Creates a sample space, which is the swarm of particles where it is\n demilitarized by the mathematical function.\n 2 - then it updates all the particles with their positions and volecidades\n thus sweeping the function and obtaining the best result of this\n function.\n\n\"\"\"\nimport multiprocessing as mp\nimport sys\n\nimport numpy as np\n\n# TODO: inherit logger\n#logging.basicConfig(filename='output.log', level=logging.DEBUG,\n# format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')\n\n\nclass Particle(object):\n \"\"\"Creates the particle and updates position and velocity.\"\"\"\n\n def __init__(self, x0, bounds, w=0.5, c=(2,2), sigma=None, vsigma=None):\n \"\"\"Initialize the particle.\n\n Args:\n :param x0(str): Initial value for the sample space to create the\n Gaussian.\n :param bounds(:obj:`list` of :obj:`str`): Limits for the sample\n space to create the Gaussian.\n \"\"\"\n self.pos_best = [] # Best position individual.\n self.fitness_best = None # Error best individual.\n self.curr_fitness = None\n self.w = w\n self.c = c\n bounds = np.array(bounds)\n if sigma is None:\n sigma = np.abs(bounds[1] - bounds[0])\n elif isinstance(sigma, float) or isinstance(sigma, int):\n sigma = np.abs(bounds[1] - bounds[0])/sigma\n self.position = np.random.normal(x0, sigma)\n if vsigma is None:\n vsigma = np.abs(bounds[1] - bounds[0])\n elif isinstance(vsigma, float) or isinstance(vsigma, int):\n vsigma = np.abs(bounds[1] - bounds[0])/vsigma\n self.velocity = np.random.normal(np.zeros(len(x0)), vsigma)\n\n def check_fitness(self):\n \"\"\"Update personal best fitness.\"\"\"\n # Check to see if the current position is an individual best:\n if self.fitness_best is None or self.curr_fitness < self.fitness_best:\n self.pos_best = self.position\n self.fitness_best = self.curr_fitness\n\n def update_velocity(self, pos_best_g):\n \"\"\"Update new particle velocity.\n\n Args:\n :param pos_best_g(str): best overall swarm position.\n\n Returns:\n :return: Void.\n\n \"\"\"\n # TODO Make these adjustable parameters\n r1 = np.random.random(len(self.velocity))\n r2 = np.random.random(len(self.velocity))\n vel_cognitive = self.c[0] * r1 * (self.pos_best - self.position)\n vel_social = self.c[1] * r2 * (pos_best_g - self.position)\n self.velocity = self.w * self.velocity + vel_cognitive + vel_social\n\n def update_position(self, bounds):\n \"\"\"Update the particle position based off new velocity updates.\n\n Args:\n :param bounds(:obj:`list` of :obj:`str`): Limits for the sample\n space to create the Gaussian.\n\n Returns:\n :return: Void.\n \"\"\"\n self.position += self.velocity\n\n # TODO Deal with velocities when particle goes out of bounds\n np.clip(self.position, bounds[0], bounds[1], out=self.position)\n np.clip(self.velocity, bounds[0], bounds[1], out=self.velocity)\n self.velocity[np.isclose(self.position, bounds[0])] *= -1\n self.velocity[np.isclose(self.position, bounds[1])] *= -1\n\n\nclass PSO(object):\n \"\"\"Contains the population and methods for performing steps.\"\"\"\n\n def __getstate__(self):\n \"\"\"Remove unpickable entries from object.\n\n Currently, removes fitness tests as callable functions.\n \"\"\"\n state = self.__dict__.copy()\n del state['tests']\n if 'hooks' in state:\n del state['hooks']\n return state\n\n def __setstate__(self, state):\n \"\"\"Recover unpickable items to restore original object.\n\n Currently, calls self.load_tests in order to get callable fitness\n tests and self.load_hooks to get pre_ and _post step hooks.\n \"\"\"\n self.__dict__.update(state)\n if 'testfiles' in self.__dict__:\n # TODO: log\n self.load_tests()\n if 'hookfiles' in self.__dict__:\n # TODO: log\n self.load_hooks(self.hookfiles)\n\n def __init__(self, maxiter=None, goal=1.0, w=0.5, c = (2,2), submit_to_cluster=False):\n \"\"\"Initialize the PSO object.\"\"\"\n self.ncpu = 1\n self.goal = goal\n self.w = w\n self.c = c\n self.submit_to_cluster = submit_to_cluster\n self.fitness = None\n self.step_number = 0\n self.maxiter = maxiter\n self.swarm = None\n if self.submit_to_cluster:\n # TODO: correctly handle the cluster and multitest cases.\n raise NotImplementedError('Cluster submission in review.')\n\n def populate(self, num_particles, x0=None, bounds=None, sigma=None,\n vsigma=None):\n \"\"\"Create the population of particles that is the swarm.\n\n Args:\n :param num_particles(:obj:`int`): Number of particles to be\n created.\n :param initial(): Initial value for the sample space to create the\n Gaussian.\n :param bounds(:obj:`list` of :obj:`str`): Limits for the sample\n space to create the Gaussian.\n\n Returns:\n :return swarm(:obj:`list` of :obj:`Particles`): a list of swarms.\n \"\"\"\n if self.swarm is None:\n self.bounds = bounds\n self.swarm = [Particle(x0, bounds, w=self.w, c=self.c,\n sigma=sigma, vsigma=vsigma) for i in range(num_particles)]\n else:\n raise RuntimeError(\"Tried to populate non-empty swarm\")\n\n def evaluate_single_fitness_test(self, func,\n enum_particles=False, add_step_num=False,\n **kwargs):\n \"\"\"Run the given function as the fitness test for all particles.\n\n Parameters:\n -----------\n fun : callable\n The fitness test function to be minimized:\n\n ``func(particle.position, **kwargs) -> float``.\n\n enum_particles : boolean\n If `True`, the swarm will be enumerated and the particle index will\n be passed to `func` as keyword `part_idx`, added to `kwargs`\n\n add_step_num : boolean\n If `True`, the current step number will be passed to `func`\n as keyword `step_num`, added to `kwargs`\n\n **kwargs: Other keywords to the fitness function, will be passed as is.\n \"\"\"\n if add_step_num:\n kwargs['step_num'] = self.step_number\n if self.ncpu == 1:\n if enum_particles:\n for part_idx, particle in enumerate(self.swarm):\n kwargs['part_idx'] = part_idx\n particle.curr_fitness = func(particle.position, **kwargs)\n else:\n for particle in self.swarm:\n particle.curr_fitness = func(particle.position, **kwargs)\n elif self.ncpu > 1:\n with mp.Pool(processes=self.ncpu) as pool:\n argslist = []\n p = []\n for part_idx, particle in enumerate(self.swarm):\n argslist.append(dict(kwargs))\n # argslist[-1]['x'] = particle.position\n if enum_particles:\n argslist[-1]['part_idx'] = part_idx\n for idx, args in enumerate(argslist):\n p.append(pool.apply_async(func, args=(self.swarm[idx].position,),kwds=args))\n results = [ r.get() for r in p ]\n for part_idx, particle in enumerate(self.swarm):\n particle.curr_fitness = results[part_idx]\n\n def calculate_global_fitness(self):\n \"\"\"Calculate the fitness of the function or sample space.\n\n Returns:\n :return fitness(:obj:`float`): Returns the fitness of the function\n or sample space.\n \"\"\"\n self.swarm_radius = 0\n for particle in self.swarm:\n particle.check_fitness()\n # determine if current particle is the best(globally)\n if self.fitness is None or particle.curr_fitness < self.fitness:\n self.pos_best_glob = np.array(particle.position)\n self.fitness = float(particle.curr_fitness)\n # Stop criteria\n for particle in self.swarm:\n dist = np.linalg.norm(particle.position - self.pos_best_glob)\n if dist > self.swarm_radius:\n self.swarm_radius = dist\n return self.fitness # Do we actually need to return something?\n\n def update_swarm(self):\n \"\"\"Update the swarm with new positions and speeds.\n\n Returns:\n :return swarm(:obj:`list` of :obj:`Particles`): returns a list of\n swarms.\n \"\"\"\n if self.fitness is None:\n logging.error(\"Cannot update the swarm before calculating Fitness\")\n raise RuntimeError(\"Updated the swarm before calculating Fitness\")\n # cycle through swarm and update velocities and position\n for particle in self.swarm:\n particle.update_velocity(self.pos_best_glob)\n particle.update_position(self.bounds)\n if self.submit_to_cluster:\n self.curr_iter['update'] += 1\n\n def do_full_step(self, func, **kwargs):\n \"\"\"Perform a full PSO step.\n\n This method goes through all other methods in order to perform a full\n PSO step, so it can be called from a loop in the run() method.\n \"\"\"\n if self.fitness is not None and self.step_number < self.maxiter:\n self.update_swarm()\n if self.submit_to_cluster:\n raise NotImplementedError('Multistep jobs are under revision.')\n else:\n self.evaluate_single_fitness_test(func, **kwargs)\n self.calculate_global_fitness()\n self.step_number += 1\n\n def run(self, func, PSO_DEBUG=None, **kwargs):\n \"\"\"Perform a full optimization run.\n\n Does the optimization with the execution of the update of the speeds\n and coordinates also checks the criterion stopped to find fitnnes.\n\n Parameters\n ----------\n func : callable\n Function that calculates fitnnes.\n\n Returns\n -------\n The dictionary that stores the optimization results.\n \"\"\"\n self.swarm_radius = None\n # TODO make a better radius-based stop criterion.\n while (self.swarm_radius is None or\n self.step_number < self.maxiter and\n self.swarm_radius > 1e-3):\n self.do_full_step(func, **kwargs)\n if PSO_DEBUG is not None:\n with open(PSO_DEBUG, 'a') as dbg_file:\n curr_best = min([p.curr_fitness for p in self.swarm])\n print(f\"# {self.step_number} {curr_best} {self.fitness}\")\n print(f\"\\n\\n# {self.step_number} {curr_best} {self.fitness}\",\n file=dbg_file)\n np.savetxt(dbg_file,\n [(*p.position, p.curr_fitness)\n for p in self.swarm])\n if self.fitness < self.goal:\n break\n self.results = {}\n self.results['best_pos'] = self.pos_best_glob\n self.results['fitness'] = self.fitness\n return self.results\n"
] | [
[
"numpy.abs",
"numpy.clip",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.savetxt",
"numpy.array",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
INASIC/npl | [
"d1ee8ca557e7ab5df8a3a724bf41068c36406cd5"
] | [
"npl/bootstrap_gmm.py"
] | [
"\"\"\"function for NPL posterior bootstrap sampler for GMM\n\nParameters\n----------\nB_postsamples : int \n Number of posterior samples to generate\n\nalph_conc : float\n Concentration parameter for DP prior\n\nT_trunc: int > 0 \n Number of prior pseudo-samples from DP base measure for truncated sampling\n\ny: array\n Observed datapoints\n\nN_data: int\n Number of data points\n\nD_data: int\n Dimension of observables\n\nK_clusters: int\n Number of clusters in GMM model\n\nR_restarts: int \n Number of random restarts per posterior bootstrap sample\n\ntol: float\n Stopping criterion for weighted EM\n\nmax_iter: int\n Maximum number of iterations for weighted EM\n\ninit: function\n Returns initial parameters for random restart maximizations \n\nsampleprior: function\n To generate prior pseudo-samples for DP prior\n\npostsamples: array\n Centering posterior samples for MDP-NPL\n \nn_cores: int\n Number of cores Joblib can parallelize over; set to -1 to use all cores\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport time\nimport copy\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\nfrom npl import maximise_gmm as mgmm\n\n\ndef bootstrap_gmm(B_postsamples,alph_conc,T_trunc,y,N_data,D_data,K_clusters,R_restarts,tol,max_iter,init,sampleprior,postsamples= None,n_cores = -1):\n #Declare parameters\n pi_bb = np.zeros((B_postsamples,K_clusters)) #mixing weights (randomly)\n mu_bb = np.zeros((B_postsamples,K_clusters,D_data)) #means\n sigma_bb = np.zeros((B_postsamples,K_clusters,D_data)) #covariances \n \n\n #Prepare for parallelization\n #concatenate y_tots\n if alph_conc!=0:\n alphas = np.concatenate((np.ones(N_data), (alph_conc/T_trunc)*np.ones(T_trunc)))\n weights = np.random.dirichlet(alphas,B_postsamples) \n y_prior = sampleprior(D_data,T_trunc,K_clusters,B_postsamples, postsamples)\n else:\n weights = np.random.dirichlet(np.ones(N_data), B_postsamples)\n y_prior = np.zeros(B_postsamples)\n\n #Initialize parameters randomly for RR-NPL\n pi_init,mu_init,sigma_init = init(R_restarts, K_clusters,B_postsamples, D_data)\n\n\n #Parallelize bootstrap\n if R_restarts == 0: #FI-NPL (with MLE initialization to select single mode)\n pi_init_mle,mu_init_mle,sigma_init_mle = mgmm.init_params(y,N_data,K_clusters,D_data,tol,max_iter)\n temp = Parallel(n_jobs=n_cores, backend= 'loky')(delayed(mgmm.maximise_mle)(y,weights[i],pi_init_mle,\\\n mu_init_mle,sigma_init_mle,K_clusters,tol,max_iter,N_data) for i in tqdm(range(B_postsamples))) \n else:\n temp = Parallel(n_jobs=n_cores, backend= 'loky')(delayed(mgmm.maximise)(y,y_prior[i],weights[i],\\\n pi_init[i*R_restarts:(i+1)*R_restarts],mu_init[i*R_restarts:(i+1)*R_restarts],sigma_init[i*R_restarts:(i+1)*R_restarts],\\\n alph_conc, T_trunc,K_clusters,tol,max_iter,R_restarts,N_data,D_data, postsamples = postsamples) for i in tqdm(range(B_postsamples)))\n \n\n for i in range(B_postsamples):\n pi_bb[i] = temp[i][0]\n mu_bb[i] = temp[i][1]\n sigma_bb[i]= temp[i][2]\n\n return pi_bb,mu_bb,sigma_bb"
] | [
[
"numpy.zeros",
"numpy.random.dirichlet",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sangyeob-Kim/tensorpack_rev | [
"bf4020892edc123a09e08de784da0448464529b2"
] | [
"tensorpack/models/regularize.py"
] | [
"# -*- coding: UTF-8 -*-\n# File: regularize.py\n\n\nimport tensorflow as tf\nimport re\n\nfrom ..utils import logger\nfrom ..utils.argtools import graph_memoized\nfrom ..tfutils.tower import get_current_tower_context\nfrom .common import layer_register\n\n__all__ = ['regularize_cost', 'regularize_cost_from_collection',\n 'l2_regularizer', 'l1_regularizer', 'Dropout']\n\n\n@graph_memoized\ndef _log_once(msg):\n logger.info(msg)\n\n\nl2_regularizer = tf.contrib.layers.l2_regularizer\nl1_regularizer = tf.contrib.layers.l1_regularizer\n\n\ndef regularize_cost(regex, func, name='regularize_cost'):\n \"\"\"\n Apply a regularizer on trainable variables matching the regex, and print\n the matched variables (only print once in multi-tower training).\n In replicated mode, it will only regularize variables within the current tower.\n\n Args:\n regex (str): a regex to match variable names, e.g. \"conv.*/W\"\n func: the regularization function, which takes a tensor and returns a scalar tensor.\n E.g., ``tf.contrib.layers.l2_regularizer``.\n\n Returns:\n tf.Tensor: a scalar, the total regularization cost.\n\n Example:\n .. code-block:: python\n\n cost = cost + regularize_cost(\"fc.*/W\", l2_regularizer(1e-5))\n \"\"\"\n assert len(regex)\n ctx = get_current_tower_context()\n if not ctx.is_training:\n # Currently cannot build the wd_cost correctly at inference,\n # because ths vs_name used in inference can be '', therefore the\n # variable filter will fail\n return tf.constant(0, dtype=tf.float32, name='empty_' + name)\n\n # If vars are shared, regularize all of them\n # If vars are replicated, only regularize those in the current tower\n if ctx.has_own_variables:\n params = ctx.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)\n else:\n params = tf.trainable_variables()\n\n names = []\n\n with tf.name_scope(name + '_internals'):\n costs = []\n for p in params:\n para_name = p.op.name\n if re.search(regex, para_name):\n costs.append(func(p))\n names.append(p.name)\n if not costs:\n return tf.constant(0, dtype=tf.float32, name='empty_' + name)\n\n # remove tower prefix from names, and print\n if len(ctx.vs_name):\n prefix = ctx.vs_name + '/'\n prefixlen = len(prefix)\n\n def f(name):\n if name.startswith(prefix):\n return name[prefixlen:]\n return name\n names = list(map(f, names))\n logger.info(\"regularize_cost() found {} variables to regularize.\".format(len(names)))\n _log_once(\"The following tensors will be regularized: {}\".format(', '.join(names)))\n\n return tf.add_n(costs, name=name)\n\n\ndef regularize_cost_from_collection(name='regularize_cost'):\n \"\"\"\n Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.\n If in replicated mode, will only regularize variables created within the current tower.\n\n Args:\n name (str): the name of the returned tensor\n\n Returns:\n tf.Tensor: a scalar, the total regularization cost.\n \"\"\"\n ctx = get_current_tower_context()\n if not ctx.is_training:\n # TODO Currently cannot build the wd_cost correctly at inference,\n # because ths vs_name used in inference can be '', therefore the\n # variable filter will fail\n return tf.constant(0, dtype=tf.float32, name='empty_' + name)\n\n # NOTE: this collection doesn't always grow with towers.\n # It only grows with actual variable creation, but not get_variable call.\n if ctx.has_own_variables: # be careful of the first tower (name='')\n losses = ctx.get_collection_in_tower(tf.GraphKeys.REGULARIZATION_LOSSES)\n else:\n losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if len(losses) > 0:\n logger.info(\"regularize_cost_from_collection() found {} regularizers \"\n \"in REGULARIZATION_LOSSES collection.\".format(len(losses)))\n reg_loss = tf.add_n(losses, name=name)\n return reg_loss\n else:\n return tf.constant(0, dtype=tf.float32, name='empty_' + name)\n\n\n@layer_register(use_scope=None)\ndef Dropout(x, *args, **kwargs):\n \"\"\"\n Same as `tf.layers.dropout`.\n However, for historical reasons, the first positional argument is\n interpreted as keep_prob rather than drop_prob.\n Explicitly use `rate=` keyword arguments to ensure things are consistent.\n \"\"\"\n if 'is_training' in kwargs:\n kwargs['training'] = kwargs.pop('is_training')\n if len(args) > 0:\n if args[0] != 0.5:\n logger.warn(\n \"The first positional argument to tensorpack.Dropout is the probability to keep, rather than to drop. \"\n \"This is different from the rate argument in tf.layers.Dropout due to historical reasons. \"\n \"To mimic tf.layers.Dropout, explicitly use keyword argument 'rate' instead\")\n rate = 1 - args[0]\n elif 'keep_prob' in kwargs:\n assert 'rate' not in kwargs, \"Cannot set both keep_prob and rate!\"\n rate = 1 - kwargs.pop('keep_prob')\n elif 'rate' in kwargs:\n rate = kwargs.pop('rate')\n else:\n rate = 0.5\n\n if kwargs.get('training', None) is None:\n kwargs['training'] = get_current_tower_context().is_training\n\n return tf.layers.dropout(x, rate=rate, **kwargs)\n"
] | [
[
"tensorflow.constant",
"tensorflow.get_collection",
"tensorflow.layers.dropout",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.add_n"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Rasend-dev/aprendizaje_reforzado | [
"eb1def49f8c439f461d54ca95b1f9eb918a05bab"
] | [
"model.py"
] | [
"from collections import deque\nimport torch \nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport os # this is for save our model\n\nclass Linear_QNet(nn.Module):\n def __init__(self,input_size,hidden_size,output_size):\n super().__init__()\n self.linear1 = nn.Linear(input_size,hidden_size)\n self.linear2 = nn.Linear(hidden_size, output_size)\n\n def forward(self,x):\n x = F.relu(self.linear1(x))\n x = self.linear2(x)\n return x\n\n def save(self,file_name='model.pth'):\n model_folder_path = './model'\n if not os.path.exists(model_folder_path):\n os.makedirs(model_folder_path)\n\n file_name = os.path.join(model_folder_path,file_name)\n torch.save(self.state_dict(), file_name)\n\nclass QTrainer:\n def __init__(self,model,lr,gamma):\n self.lr = lr\n self.gamma = gamma\n self.model = model\n self.optimizer = optim.Adam(model.parameters(),lr=self.lr)\n self.criterion = nn.MSELoss()\n\n def train_step(self,state,action,reward,next_state,done):\n state = torch.tensor(state,dtype = torch.float)\n next_state = torch.tensor(next_state,dtype = torch.float)\n action = torch.tensor(action,dtype = torch.float)\n reward = torch.tensor(reward,dtype = torch.float)\n #(n,x)\n \n if len(state.shape) == 1:\n # (1,x)\n state = torch.unsqueeze(state,0)\n next_state = torch.unsqueeze(next_state,0)\n action = torch.unsqueeze(action,0)\n reward = torch.unsqueeze(reward,0)\n done = (done,)\n\n #1: predicted Q values with current state\n pred = self.model(state) \n\n target = pred.clone()\n for idx in range(len(done)):\n Q_new = reward[idx]\n if not done[idx]:\n Q_new = reward[idx] + self.gamma * torch.max(self.model(next_state[idx]))\n\n target[idx][torch.argmax(action).item()] = Q_new\n\n #2 Q_new = r y * max(next_predicted Q value) -> only do this if not done\n #pred.clone()\n #preds[argmax(action)] = Q_new \n self.optimizer.zero_grad()\n loss = self.criterion(target, pred)\n loss.backward()\n\n self.optimizer.step()"
] | [
[
"torch.unsqueeze",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TeemooHuang/2048-api | [
"ddea840d3236fa0601ea7b38de5ddc5edddc6fa9"
] | [
"RNNs_vote/RNNs_vote10_add.py"
] | [
"from game2048.game import Game\nfrom game2048.displays import Display, IPythonDisplay\nfrom game2048.agents import Agent, RandomAgent, ExpectiMaxAgent\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nimport time\n\n#from sklearn import preprocessing\n\nBATCH_SIZE = 128\n\ndisplay1 = Display()\ndisplay2 = IPythonDisplay()\n\n'''\ntrain_x_512 = np.load('data_x_final_enc_512.npy').reshape(-1, 16)\ntrain_y_512 = np.load('data_y_final_512.npy')\n'''\ntrain_x1 = np.load(\"/cluster/home/it_stu91/2048/data_x_final_enc_0_256.npy\")\ntrain_y1 = np.load(\"/cluster/home/it_stu91/2048/data_y_final_0_256.npy\")\n\ntrain_x2 = np.load(\"/cluster/home/it_stu91/2048/data_x_final_enc_256_512.npy\")\ntrain_y2 = np.load(\"/cluster/home/it_stu91/2048/data_y_final_256_512.npy\")\n\ntrain_x3 = np.load(\"/cluster/home/it_stu91/2048/data_x_final_enc_1024.npy\")\ntrain_y3 = np.load(\"/cluster/home/it_stu91/2048/data_y_final_1024.npy\")\n\ntrain_x4 = np.load(\"/cluster/home/it_stu91/2048/data_x_final_enc_2048.npy\")\ntrain_y4 = np.load(\"/cluster/home/it_stu91/2048/data_y_final_2048.npy\")\n\ntrain_x5 = np.load(\"/cluster/home/it_stu91/2048/data_x_add_enc.npy\")\ntrain_y5 = np.load(\"/cluster/home/it_stu91/2048/data_y_add.npy\")\n\ntrain_x6 = np.load(\"/cluster/home/it_stu91/2048/data_x_add2_enc.npy\")\ntrain_y6 = np.load(\"/cluster/home/it_stu91/2048/data_y_add2.npy\")\n'''\ntrain_x_2048 = np.load('data_x_final_enc_2048.npy').reshape(-1, 16)\ntrain_y_2048 = np.load('data_y_final_2048.npy')\n'''\n'''\ntrain_x_512 = torch.Tensor(train_x_512)\ntrain_y_512 = torch.LongTensor(train_y_512)\n'''\ntrain_x = np.zeros((0, 16, 11))\ntrain_y = np.zeros((0, 1))\n\ntrain_x = np.vstack((train_x, train_x1))\ntrain_x = np.vstack((train_x, train_x2))\ntrain_x = np.vstack((train_x, train_x3))\ntrain_x = np.vstack((train_x, train_x4))\ntrain_x = np.vstack((train_x, train_x5))\ntrain_x = np.vstack((train_x, train_x6))\n\n\ntrain_y = np.vstack((train_y, train_y1))\ntrain_y = np.vstack((train_y, train_y2))\ntrain_y = np.vstack((train_y, train_y3))\ntrain_y = np.vstack((train_y, train_y4))\ntrain_y = np.vstack((train_y, train_y5))\ntrain_y = np.vstack((train_y, train_y6))\n\ntrain_x = torch.Tensor(train_x)\ntrain_y = torch.LongTensor(train_y)\n'''\ntrain_x_2048 = torch.Tensor(train_x_2048)\ntrain_y_2048 = torch.LongTensor(train_y_2048)\n'''\n\nclass MyDataset(data.Dataset):\n def __init__(self, images, labels):\n self.images = images\n self.labels = labels\n\n def __getitem__(self, index):#返回的是tensor\n img, target = self.images[index], self.labels[index]\n return img, target\n\n def __len__(self):\n return len(self.images)\n'''\nmydataset_512 = MyDataset(train_x_512, train_y_512)\ntrain_loader_512 = data.DataLoader(mydataset_512, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)\n'''\nmydataset = MyDataset(train_x, train_y)\ntrain_loader = data.DataLoader(mydataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)\n'''\nmydataset_2048 = MyDataset(train_x_2048, train_y_2048)\ntrain_loader_2048 = data.DataLoader(mydataset_2048, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)\n'''\n\n\n\n\nsequence_length = 16 # 序列长度,将图像的每一列作为一个序列\ninput_size = 11 # 输入数据的维度\nhidden_size = 256 # 隐藏层的size\nnum_layers = 4 # 有多少层\n\nnum_classes = 4\nbatch_size = 128\nnum_epochs = 20\nlearning_rate = 0.001\n\n\n'''\nclass RNN_512(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(RNN_512, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # batch_first=True仅仅针对输入而言\n self.fc = nn.Linear(hidden_size, num_classes)\n \n def forward(self, x):\n # 设置初始状态h_0与c_0的状态是初始的状态,一般设置为0,尺寸是,x.size(0)\n h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda()) \n c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda())\n \n # Forward propagate RNN\n out, (h_n, c_n) = self.lstm(x, (h0, c0)) # 送入一个初始的x值,作为输入以及(h0, c0)\n \n # Decode hidden state of last time step\n out = self.fc(out[:, -1, :]) # output也是batch_first, 实际上h_n与c_n并不是batch_first\n return out\n\nmodel_512 = RNN_512(input_size, hidden_size, num_layers, num_classes)\nmodel_512 = model_512.cuda()\n'''\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # batch_first=True仅仅针对输入而言\n self.fc = nn.Linear(hidden_size, num_classes)\n \n def forward(self, x):\n # 设置初始状态h_0与c_0的状态是初始的状态,一般设置为0,尺寸是,x.size(0)\n h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda()) \n c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda())\n \n # Forward propagate RNN\n out, (h_n, c_n) = self.lstm(x, (h0, c0)) # 送入一个初始的x值,作为输入以及(h0, c0)\n \n # Decode hidden state of last time step\n out = self.fc(out[:, -1, :]) # output也是batch_first, 实际上h_n与c_n并不是batch_first\n return out\n\nmodel = RNN(input_size, hidden_size, num_layers, num_classes)\nmodel = model.cuda()\n\n'''\nclass RNN_2048(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(RNN_2048, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) # batch_first=True仅仅针对输入而言\n self.fc = nn.Linear(hidden_size, num_classes)\n \n def forward(self, x):\n # 设置初始状态h_0与c_0的状态是初始的状态,一般设置为0,尺寸是,x.size(0)\n h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda()) \n c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size).cuda())\n \n # Forward propagate RNN\n out, (h_n, c_n) = self.lstm(x, (h0, c0)) # 送入一个初始的x值,作为输入以及(h0, c0)\n \n # Decode hidden state of last time step\n out = self.fc(out[:, -1, :]) # output也是batch_first, 实际上h_n与c_n并不是batch_first\n return out\n\nmodel_2048 = RNN_1024(input_size, hidden_size, num_layers, num_classes)\nmodel_2048 = model_2048.cuda()\n\n'''\ncriterion = nn.CrossEntropyLoss()\n#criterion = nn.L1Loss()\n#optimizer_512 = torch.optim.Adam(model_512.parameters(), lr = 0.001)\noptimizer = torch.optim.Adam(model.parameters(), lr = 0.001)\n#optimizer_2048 = torch.optim.Adam(model_2048.parameters(), lr = 0.001)\n\n\nmodel = model.cuda()\nNUM_EPOCHS = 8\nfor epoch in range(NUM_EPOCHS):\n i = 0\n running_loss = 0\n print('EPOCHS', epoch + 1)\n correct = 0\n for images, labels in tqdm(train_loader):\n i += 1\n images, labels = Variable(images), Variable(labels)\n #print(images.shape)\n labels = labels.long()\n optimizer.zero_grad()\n #print(images.shape)\n #print (images.shape)\n images = images.reshape(-1, 16, 11).cuda()\n #print (images.shape)\n output = model(images).reshape(-1, 4).cuda()\n labels = labels.float().reshape(-1).cuda()\n correct += (labels.cpu().numpy() == output.cpu().detach().numpy().argmax(axis = 1)).sum()\n #print(output.shape)\n #print(labels.shape)\n loss = criterion(output, labels.long())\n running_loss += float(loss)\n loss.backward()\n optimizer.step()\n print(running_loss/i)\n print(\"accuracy: \", correct/float(train_x.shape[0]))\n #print(\"accuracy: \", correct/float(train_x.shape[0]))\n#torch.save(model_1024, '/cluster/home/it_stu84/2048/model_1024.pkl')\ntorch.save(model.state_dict(), '/cluster/home/it_stu91/2048/model_RNN_vote10_add_params.pkl')\n"
] | [
[
"torch.LongTensor",
"torch.nn.CrossEntropyLoss",
"torch.Tensor",
"torch.nn.LSTM",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"numpy.load",
"numpy.zeros",
"numpy.vstack",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheSignPainter/CausalDiscoveryToolbox | [
"073b13a5076390147e95763bab73775c59e6d891"
] | [
"cdt/causality/pairwise/RCC.py"
] | [
"\"\"\"Randomized Causation Coefficient Model.\n\nAuthor : David Lopez-Paz\nRef : Lopez-Paz, David and Muandet, Krikamol and Schölkopf, Bernhard and Tolstikhin, Ilya O,\n \"Towards a Learning Theory of Cause-Effect Inference\", ICML 2015.\n\n.. MIT License\n..\n.. Copyright (c) 2018 Diviyan Kalainathan\n..\n.. Permission is hereby granted, free of charge, to any person obtaining a copy\n.. of this software and associated documentation files (the \"Software\"), to deal\n.. in the Software without restriction, including without limitation the rights\n.. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n.. copies of the Software, and to permit persons to whom the Software is\n.. furnished to do so, subject to the following conditions:\n..\n.. The above copyright notice and this permission notice shall be included in all\n.. copies or substantial portions of the Software.\n..\n.. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n.. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n.. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n.. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n.. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n.. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n.. SOFTWARE.\n\"\"\"\n\nfrom sklearn.preprocessing import scale\nfrom sklearn.ensemble import RandomForestClassifier as CLF\nfrom ...utils.Settings import SETTINGS\nimport pandas\nimport numpy as np\nfrom .model import PairwiseModel\n\n\nclass RCC(PairwiseModel):\n \"\"\"Randomized Causation Coefficient model. 2nd approach in the Fast\n Causation challenge.\n\n **Description:** The Randomized causation coefficient (RCC) relies on the\n projection of the empirical distributions into a RKHS using random cosine\n embeddings, then classfies the pairs using a random forest based on those\n features.\n\n **Data Type:** Continuous, Categorical, Mixed\n\n **Assumptions:** This method needs a substantial amount of labelled causal\n pairs to train itself. Its final performance depends on the training set\n used.\n\n Args:\n rand_coeff (int): number of randomized coefficients\n nb_estimators (int): number of estimators\n nb_min_leaves (int): number of min samples leaves of the estimator\n max_depth (): (optional) max depth of the model\n s (float): scaling\n njobs (int): number of jobs to be run on parallel (defaults to ``cdt.SETTINGS.NJOBS``)\n verbose (bool): verbosity (defaults to ``cdt.SETTINGS.verbose``)\n\n .. note::\n Ref : Lopez-Paz, David and Muandet, Krikamol and Schölkopf, Bernhard and Tolstikhin, Ilya O,\n \"Towards a Learning Theory of Cause-Effect Inference\", ICML 2015.\n\n Example:\n >>> from cdt.causality.pairwise import RCC\n >>> import networkx as nx\n >>> import matplotlib.pyplot as plt\n >>> from cdt.data import load_dataset\n >>> from sklearn.model_selection import train_test_split\n >>> data, labels = load_dataset('tuebingen')\n >>> X_tr, X_te, y_tr, y_te = train_test_split(data, labels, train_size=.5)\n >>>\n >>> obj = RCC()\n >>> obj.fit(X_tr, y_tr)\n >>> # This example uses the predict() method\n >>> output = obj.predict(X_te)\n >>>\n >>> # This example uses the orient_graph() method. The dataset used\n >>> # can be loaded using the cdt.data module\n >>> data, graph = load_dataset('sachs')\n >>> output = obj.orient_graph(data, nx.DiGraph(graph))\n >>>\n >>> # To view the directed graph run the following command\n >>> nx.draw_networkx(output, font_size=8)\n >>> plt.show()\n \"\"\"\n\n def __init__(self, rand_coeff=333, nb_estimators=500, nb_min_leaves=20,\n max_depth=None, s=10, njobs=None, verbose=None):\n \"\"\"Initialize the model w/ its parameters.\n \"\"\"\n np.random.seed(0)\n self.K = rand_coeff\n self.E = nb_estimators\n self.L = nb_min_leaves\n self.njobs, self.verbose = SETTINGS.get_default(('njobs', njobs), ('verbose', verbose))\n self.max_depth = max_depth\n\n self.W = np.hstack((s * np.random.randn(self.K, 2),\n 2 * np.pi * np.random.rand(self.K, 1)))\n self.W2 = np.hstack((s * np.random.randn(self.K, 1),\n 2 * np.pi * np.random.rand(self.K, 1)))\n self.clf = None\n\n def featurize_row(self, x, y):\n \"\"\" Projects the causal pair to the RKHS using the sampled kernel approximation.\n\n Args:\n x (np.ndarray): Variable 1\n y (np.ndarray): Variable 2\n\n Returns:\n np.ndarray: projected empirical distributions into a single fixed-size vector.\n \"\"\"\n x = x.ravel()\n y = y.ravel()\n b = np.ones(x.shape)\n dx = np.cos(np.dot(self.W2, np.vstack((x, b)))).mean(1)\n dy = np.cos(np.dot(self.W2, np.vstack((y, b)))).mean(1)\n if(sum(dx) > sum(dy)):\n return np.hstack((dx, dy,\n np.cos(np.dot(self.W, np.vstack((x, y, b)))).mean(1)))\n else:\n return np.hstack((dx, dy,\n np.cos(np.dot(self.W, np.vstack((y, x, b)))).mean(1)))\n\n def fit(self, x, y):\n \"\"\"Train the model.\n\n Args:\n x_tr (pd.DataFrame): CEPC format dataframe containing the pairs\n y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs\n \"\"\"\n train = np.vstack((np.array([self.featurize_row(row.iloc[0],\n row.iloc[1]) for idx, row in x.iterrows()]),\n np.array([self.featurize_row(row.iloc[1],\n row.iloc[0]) for idx, row in x.iterrows()])))\n labels = np.vstack((y, -y)).ravel()\n verbose = 1 if self.verbose else 0\n self.clf = CLF(verbose=verbose,\n min_samples_leaf=self.L,\n n_estimators=self.E,\n max_depth=self.max_depth,\n n_jobs=self.njobs).fit(train, labels)\n\n def predict_proba(self, dataset, **kwargs):\n \"\"\" Predict the causal score using a trained RCC model\n\n Args:\n dataset (tuple): Couple of np.ndarray variables to classify\n\n Returns:\n float: Causation score (Value : 1 if a->b and -1 if b->a)\n \"\"\"\n if self.clf is None:\n raise ValueError(\"Model has to be trained before making predictions.\")\n\n x, y = dataset\n input_ = self.featurize_row(x, y).reshape((1, -1))\n return self.clf.predict(input_)\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.random.seed",
"numpy.ones",
"numpy.random.randn",
"numpy.random.rand",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ptsagkis/UGLab | [
"2ef1cdd0927152c763f6b36154b0a7b6c4ca2d54"
] | [
"Model/FeaturesImpact.py"
] | [
"from matplotlib.pyplot import xticks\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LogisticRegression\nfrom matplotlib import pyplot\nfrom Config.Constants import Constants\nfrom Services.FileUtils import FileUtils\n\n\nclass FeaturesImpact:\n \"\"\"\n couple of methods to support the representation of feature's impact\n \"\"\"\n\n def __init__(self, project_path):\n self.project_path = project_path\n self.plot_labels = [\n 'road_net',\n 'city_dist',\n 'coast_dist',\n 'height',\n 'slope',\n 'hillshade',\n 'aspect',\n 'pop',\n '1', '2', '3', '4', '5', '6', '7', '8',\n 't-1 class A',\n 't-1 class B',\n ]\n\n def printImportanceLR(self, X_train, y_train, show=False):\n \"\"\"\n Set the solver. @default: 'lbfgs'\n ‘newton-cg’ - [‘l2’, ‘none’]\n ‘lbfgs’ - [‘l2’, ‘none’]\n ‘liblinear’ - [‘l1’, ‘l2’]\n ‘sag’ - [‘l2’, ‘none’]\n ‘saga’ - [‘elasticnet’, ‘l1’, ‘l2’, ‘none’]\n \"\"\"\n model = LogisticRegression(solver='lbfgs', max_iter=1000)\n # fit the model\n model.fit(X_train, y_train)\n # get importance\n importance = model.coef_[0]\n # remove last two elements\n importance_m = importance[:len(importance)]\n # print summarize feature importance\n for i, v in enumerate(importance):\n print('Feature: %0d, Score: %.5f' % (i, v))\n # plot feature importance\n pyplot.bar([x for x in range(len(importance_m))], [float(i)/sum(list(map(abs, importance_m))) for i in list(map(abs, importance_m))])\n locs, labels = xticks()\n pyplot.subplots_adjust(bottom=0.2)\n xticks([*range(0, 18, 1)],\n self.plot_labels,\n rotation=90) # Set text labels and properties.\n FileUtils.delete_file(self.project_path + Constants.ML_RESULTS_DIR + 'FEATURE_IMPACT_LR.png')\n pyplot.savefig(self.project_path + Constants.ML_RESULTS_DIR + 'FEATURE_IMPACT_LR.png')\n if show:\n pyplot.show()\n\n def printImportanceRF(self, X_train, y_train, show=False):\n model = RandomForestRegressor()\n # fit the model\n model.fit(X_train, y_train)\n # get importance\n importance = model.feature_importances_\n # remove last two elements\n importance_m = importance[:len(importance)]\n # summarize feature importance\n for i, v in enumerate(importance):\n print('Feature: %0d, Score: %.5f' % (i, v))\n # plot feature importance\n pyplot.bar([x for x in range(len(importance_m))], list(map(abs, importance_m)))\n locs, labels = xticks()\n pyplot.subplots_adjust(bottom=0.2)\n\n xticks([*range(0, 18, 1)],\n self.plot_labels,\n rotation=45) # Set text labels and properties.\n FileUtils.delete_file(self.project_path + Constants.ML_RESULTS_DIR + 'FEATURE_IMPACT_RF.png')\n pyplot.savefig(self.project_path + Constants.ML_RESULTS_DIR + 'FEATURE_IMPACT_RF.png')\n if show:\n pyplot.show()\n"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stevezhangz/BERT-pytorch | [
"86e1d9f20c48306eeb8b6790977a575fc97bed77"
] | [
"train_demo.py"
] | [
"# encode utf-8\n# code by steve zhang z\n# Time: 4/22/2021\n# electric address: [email protected]\nfrom bert import *\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom Config_load import *\nfrom data_process import *\nimport random\n\n\nnp.random.seed(random_seed)\n\n\n# transform json to list\n#json2list=general_transform_text2list(\"data/demo.txt\",type=\"txt\")\njson2list=general_transform_text2list(\"data/chinese-poetry/chuci/chuci.json\",type=\"json\",args=['content'])\ndata=json2list.getdata()\n# transform list to token\nlist2token=generate_vocab_normalway(data,map_dir=\"words_info.json\")\nsentences,token_list,idx2word,word2idx,vocab_size=list2token.transform()\nbatch = creat_batch(batch_size,max_pred,maxlen,word2idx,idx2word,token_list,0.15)\nloader = Data.DataLoader(Text_file(batch), batch_size, True)\n\nmodel=Bert(n_layers=n_layers,\n vocab_size=vocab_size,\n emb_size=d_model,\n max_len=maxlen,\n seg_size=n_segments,\n dff=d_ff,\n dk=d_k,\n dv=d_v,\n n_head=n_heads,\n n_class=2,\n drop=drop)\n\nif use_gpu:\n with torch.cuda.device(device) as device:\n model.to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adadelta(model.parameters(), lr=lr)\n model.Train_for_mask_guess(epoches=epoches,\n train_data_loader=loader,\n optimizer=optimizer,\n criterion=criterion,\n save_dir=weight_dir,\n save_freq=100,\n load_dir=\"checkpoint/checkpoint_199.pth\",\n use_gpu=use_gpu,\n device=device\n )\nelse:\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adadelta(model.parameters(), lr=lr)\n model.Train_for_mask_guess(epoches=epoches,\n train_data_loader=loader,\n optimizer=optimizer,\n criterion=criterion,\n save_dir=weight_dir,\n save_freq=50,\n load_dir=\"checkpoint/checkpoint_199.pth\",\n use_gpu=use_gpu,\n device=device\n )\n"
] | [
[
"torch.cuda.device",
"torch.nn.CrossEntropyLoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
maxguy2001/datafest2021 | [
"2258651b763f258ea6c2ab9c19950cdc8ee8d75a"
] | [
"wordcloud/cal.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = pd.read_csv(\"data/uk.csv\")\n\ncols_drug_use = [\n 'FENT_USE',\n 'BUP_USE',\n 'METH_USE',\n 'MORPH_USE',\n 'OXY_USE',\n 'TRAM_USE',\n 'TAP_USE',\n 'COD_USE',\n 'DIHY_USE',\n 'HYDM_USE',\n 'SUF_USE',\n 'STIM_USE',\n 'BENZ_USE',\n 'THC_USE',\n 'DEX_USE',\n 'DIPH_USE',\n 'LOP_USE'\n]\n\ndrug_names = [\n 'Fentanyl',\n 'Buprenorphine',\n 'Methadone',\n 'Morphine',\n 'Oxycodone',\n 'Tramadol',\n 'Tapentadol',\n 'Codeine',\n 'Dihydrocodeine',\n 'Hydromorphone',\n 'Sufentanil',\n 'Stimulands',\n 'Benzodiazepines',\n 'Cannabinoids',\n 'Dextromethorphan',\n 'Diphenhydramine',\n 'Loperamide'\n]\n\ndf_drug_use = df[cols_drug_use]\ndf_drug_use.columns = drug_names\n\n\ndef plot(df_drug_use):\n fig, ax = plt.subplots(1, 1, figsize=(15, 15))\n corr = df_drug_use.corr()\n mask = np.zeros_like(corr)\n mask[np.triu_indices_from(mask)] = True\n ax = sns.heatmap(corr, annot=True, mask=mask, linewidths=.5, cmap=\"YlGnBu\", square=True)\n ax.set_title('Relationship between prescription \\n& over-the-counter drug usage', fontsize=35)\n plt.savefig('drug_use_corr_heatmap.png')\n"
] | [
[
"pandas.read_csv",
"numpy.triu_indices_from",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
phankiewicz/aem | [
"61a9b502bb713413b916d82e7247fb6e9c41bc06"
] | [
"src/greedy.py"
] | [
"import math\nfrom collections import defaultdict\n\nimport numpy as np\n\n\ndef get_regret(insertion_costs):\n if len(insertion_costs) >= 2:\n return insertion_costs[1][0] - insertion_costs[0][0]\n return insertion_costs[0][0] * -1\n\n\ndef nn_greedy_tsp(distance_matrix, starting_vertex, *args, **kwargs):\n _, matrix_width = distance_matrix.shape\n number_of_vertices_required = math.ceil(0.5 * matrix_width)\n vertices_visited = np.zeros(matrix_width)\n cycle_vertices = []\n cycle_length = 0\n\n current_vertex = starting_vertex\n\n for iteration in range(number_of_vertices_required):\n cycle_vertices.append(current_vertex)\n vertices_visited[current_vertex] = 1\n\n distances = [\n (index, distance)\n for index, distance in enumerate(distance_matrix[current_vertex, :])\n if distance != 0 and not vertices_visited[index]\n ]\n current_vertex, min_distance = min(distances, key=lambda x: x[1])\n cycle_length += min_distance\n\n last_vertex = current_vertex\n cycle_vertices.append(starting_vertex)\n cycle_length += distance_matrix[starting_vertex, last_vertex]\n\n return cycle_vertices, cycle_length\n\n\ndef regret_1_greedy_cycle_tsp(distance_matrix, starting_vertex, *args, **kwargs):\n _, matrix_width = distance_matrix.shape\n number_of_vertices_required = math.ceil(0.5 * matrix_width)\n vertices_visited = np.zeros(matrix_width)\n cycle_vertices = []\n cycle_length = 0\n\n cycle_vertices.extend([starting_vertex, starting_vertex])\n vertices_visited[starting_vertex] = 1\n\n for iteration in range(number_of_vertices_required - 1):\n insertion_costs = []\n insertion_costs_per_vertex = defaultdict(list)\n for first_vertex, second_vertex in zip(cycle_vertices, cycle_vertices[1:]):\n for target_vertex in [\n vertex for vertex in range(matrix_width) if not vertices_visited[vertex]\n ]:\n insertion_cost = (\n distance_matrix[first_vertex, target_vertex]\n + distance_matrix[second_vertex, target_vertex]\n - distance_matrix[first_vertex, second_vertex]\n )\n insertion_costs_per_vertex[target_vertex].append(\n (insertion_cost, target_vertex, (first_vertex, second_vertex))\n )\n insertion_costs.append(\n (insertion_cost, target_vertex, (first_vertex, second_vertex))\n )\n\n for key, value in insertion_costs_per_vertex.items():\n insertion_costs_per_vertex[key] = sorted(value, key=lambda x: x[0])\n\n regrets = [\n (vertex, get_regret(insertion_costs))\n for vertex, insertion_costs in insertion_costs_per_vertex.items()\n ]\n\n biggest_regret_vertex, _ = max(regrets, key=lambda x: x[1])\n\n chosen_insertion = insertion_costs_per_vertex[biggest_regret_vertex][0]\n chosen_insertion_cost, chosen_vertex, (\n first_position_vertex,\n second_position_vertex,\n ) = chosen_insertion\n assert chosen_vertex == biggest_regret_vertex\n cycle_length += chosen_insertion_cost\n position_index = cycle_vertices.index(first_position_vertex) + 1\n assert cycle_vertices[position_index] == second_position_vertex\n cycle_vertices.insert(position_index, chosen_vertex)\n vertices_visited[chosen_vertex] = 1\n\n return cycle_vertices, cycle_length\n\n\ndef greedy_cycle_tsp(distance_matrix, starting_vertex, *args, **kwargs):\n _, matrix_width = distance_matrix.shape\n number_of_vertices_required = math.ceil(0.5 * matrix_width)\n vertices_visited = np.zeros(matrix_width)\n cycle_vertices = []\n cycle_length = 0\n\n cycle_vertices.extend([starting_vertex, starting_vertex])\n vertices_visited[starting_vertex] = 1\n\n for iteration in range(number_of_vertices_required - 1):\n insertion_costs = []\n for first_vertex, second_vertex in zip(cycle_vertices, cycle_vertices[1:]):\n for target_vertex in [\n vertex for vertex in range(matrix_width) if not vertices_visited[vertex]\n ]:\n insertion_cost = (\n distance_matrix[first_vertex, target_vertex]\n + distance_matrix[second_vertex, target_vertex]\n - distance_matrix[first_vertex, second_vertex]\n )\n insertion_costs.append(\n (insertion_cost, target_vertex, (first_vertex, second_vertex))\n )\n\n min_insertion_cost, new_vertex, (\n first_position_vertex,\n second_position_vertex,\n ) = min(insertion_costs, key=lambda x: x[0])\n cycle_length += min_insertion_cost\n position_index = cycle_vertices.index(first_position_vertex) + 1\n assert cycle_vertices[position_index] == second_position_vertex\n cycle_vertices.insert(position_index, new_vertex)\n vertices_visited[new_vertex] = 1\n\n return cycle_vertices, cycle_length\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chrisprobert/genomelake | [
"11eb1d9dce41a07d0cdf13049789983b972ef937"
] | [
"tests/test_extractors.py"
] | [
"from genomelake import backend\nfrom genomelake.extractors import ArrayExtractor, BigwigExtractor, FastaExtractor\nimport numpy as np\nfrom pybedtools import Interval\nimport pyBigWig\nimport pytest\n\narray_extractor_fasta_params = [\n (\"numpy\", True),\n (\"numpy\", False),\n (\"bcolz\", True),\n (\"bcolz\", False),\n (\"tiledb\", False),\n (\"tiledb\", True),\n]\n\n\ndef test_fasta_extractor_valid_intervals():\n extractor = FastaExtractor(\"tests/data/fasta_test.fa\")\n intervals = [Interval(\"chr1\", 0, 10), Interval(\"chr2\", 0, 10)]\n expected_data = np.array(\n [\n [\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n ],\n [\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n [0.25, 0.25, 0.25, 0.25],\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n [0.25, 0.25, 0.25, 0.25],\n ],\n ],\n dtype=np.float32,\n )\n data = extractor(intervals)\n assert (data == expected_data).all()\n\n\ndef test_fasta_extractor_over_chr_end():\n extractor = FastaExtractor(\"tests/data/fasta_test.fa\")\n intervals = [Interval(\"chr1\", 0, 100), Interval(\"chr1\", 1, 101)]\n with pytest.raises(ValueError):\n data = extractor(intervals)\n\n\[email protected](\"mode,in_memory\", array_extractor_fasta_params)\ndef test_array_extractor_fasta(mode, in_memory):\n data_dir = \"tests/data/fasta_test_dir_{}_{}\".format(mode, in_memory)\n backend.extract_fasta_to_file(\n \"tests/data/fasta_test.fa\", data_dir, mode=mode, overwrite=True\n )\n extractor = ArrayExtractor(data_dir, in_memory=in_memory)\n intervals = [Interval(\"chr1\", 0, 10), Interval(\"chr2\", 0, 10)]\n expected_data = np.array(\n [\n [\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n ],\n [\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n [0.25, 0.25, 0.25, 0.25],\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.],\n [0.25, 0.25, 0.25, 0.25],\n ],\n ],\n dtype=np.float32,\n )\n data = extractor(intervals)\n assert (data == expected_data).all()\n\n\[email protected]\ndef test_bigwig_and_intervals():\n bw_path = \"tests/data/test_bigwig.bw\"\n intervals = [Interval(\"chr1\", 0, 10), Interval(\"chr2\", 0, 10)]\n expected_chr1 = np.array([0.1] * 10, dtype=np.float32)\n expected_chr2 = np.array([0] + [9] * 9, dtype=np.float32)\n expected_data = np.stack([expected_chr1, expected_chr2])\n\n return (bw_path, intervals, expected_data)\n\n\[email protected](\"mode,in_memory\", array_extractor_fasta_params)\ndef test_array_extractor_bigwig(test_bigwig_and_intervals, mode, in_memory):\n bw_path, intervals, expected_data = test_bigwig_and_intervals\n bw_dir_path = \"{}.dir\".format(bw_path)\n backend.extract_bigwig_to_file(bw_path, bw_dir_path, mode=mode, overwrite=True)\n extractor = ArrayExtractor(bw_dir_path, in_memory=in_memory)\n\n data = extractor(intervals)\n assert (data == expected_data).all()\n\n\ndef test_bigwig_extractor(test_bigwig_and_intervals):\n bw_path, intervals, expected_data = test_bigwig_and_intervals\n extractor = BigwigExtractor(bw_path)\n data = extractor(intervals)\n assert (data == expected_data).all()\n"
] | [
[
"numpy.array",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
owkin/MONAI | [
"a1c50211c916ee8d7c18d72f4e8a0721b403418a"
] | [
"monai/data/utils.py"
] | [
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport os\nimport warnings\nfrom itertools import product, starmap\nfrom pathlib import PurePath\nfrom typing import Dict, Generator, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch.utils.data._utils.collate import default_collate\n\nfrom monai.networks.layers.simplelayers import GaussianFilter\nfrom monai.utils import (\n BlendMode,\n NumpyPadMode,\n ensure_tuple,\n ensure_tuple_rep,\n ensure_tuple_size,\n first,\n optional_import,\n)\n\nnib, _ = optional_import(\"nibabel\")\n\n\ndef get_random_patch(\n dims: Sequence[int], patch_size: Sequence[int], rand_state: Optional[np.random.RandomState] = None\n) -> Tuple[slice, ...]:\n \"\"\"\n Returns a tuple of slices to define a random patch in an array of shape `dims` with size `patch_size` or the as\n close to it as possible within the given dimension. It is expected that `patch_size` is a valid patch for a source\n of shape `dims` as returned by `get_valid_patch_size`.\n\n Args:\n dims: shape of source array\n patch_size: shape of patch size to generate\n rand_state: a random state object to generate random numbers from\n\n Returns:\n (tuple of slice): a tuple of slice objects defining the patch\n \"\"\"\n\n # choose the minimal corner of the patch\n rand_int = np.random.randint if rand_state is None else rand_state.randint\n min_corner = tuple(rand_int(0, ms - ps + 1) if ms > ps else 0 for ms, ps in zip(dims, patch_size))\n\n # create the slices for each dimension which define the patch in the source array\n return tuple(slice(mc, mc + ps) for mc, ps in zip(min_corner, patch_size))\n\n\ndef iter_patch_slices(\n dims: Sequence[int], patch_size: Union[Sequence[int], int], start_pos: Sequence[int] = ()\n) -> Generator[Tuple[slice, ...], None, None]:\n \"\"\"\n Yield successive tuples of slices defining patches of size `patch_size` from an array of dimensions `dims`. The\n iteration starts from position `start_pos` in the array, or starting at the origin if this isn't provided. Each\n patch is chosen in a contiguous grid using a first dimension as least significant ordering.\n\n Args:\n dims: dimensions of array to iterate over\n patch_size: size of patches to generate slices for, 0 or None selects whole dimension\n start_pos: starting position in the array, default is 0 for each dimension\n\n Yields:\n Tuples of slice objects defining each patch\n \"\"\"\n\n # ensure patchSize and startPos are the right length\n ndim = len(dims)\n patch_size_ = get_valid_patch_size(dims, patch_size)\n start_pos = ensure_tuple_size(start_pos, ndim)\n\n # collect the ranges to step over each dimension\n ranges = tuple(starmap(range, zip(start_pos, dims, patch_size_)))\n\n # choose patches by applying product to the ranges\n for position in product(*ranges[::-1]): # reverse ranges order to iterate in index order\n yield tuple(slice(s, s + p) for s, p in zip(position[::-1], patch_size_))\n\n\ndef dense_patch_slices(\n image_size: Sequence[int],\n patch_size: Sequence[int],\n scan_interval: Sequence[int],\n) -> List[Tuple[slice, ...]]:\n \"\"\"\n Enumerate all slices defining 2D/3D patches of size `patch_size` from an `image_size` input image.\n\n Args:\n image_size: dimensions of image to iterate over\n patch_size: size of patches to generate slices\n scan_interval: dense patch sampling interval\n\n Raises:\n ValueError: When ``image_size`` length is not one of [2, 3].\n\n Returns:\n a list of slice objects defining each patch\n\n \"\"\"\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError(f\"Unsupported image_size length: {len(image_size)}, available options are [2, 3]\")\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = list()\n for i in range(num_spatial_dims):\n if scan_interval[i] == 0:\n scan_num.append(1)\n else:\n num = int(math.ceil(float(image_size[i]) / scan_interval[i]))\n scan_dim = first(d for d in range(num) if d * scan_interval[i] + patch_size[i] >= image_size[i])\n scan_num.append(scan_dim + 1)\n\n slices: List[Tuple[slice, ...]] = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices\n\n\ndef iter_patch(\n arr: np.ndarray,\n patch_size: Union[Sequence[int], int] = 0,\n start_pos: Sequence[int] = (),\n copy_back: bool = True,\n mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP,\n **pad_opts: Dict,\n) -> Generator[np.ndarray, None, None]:\n \"\"\"\n Yield successive patches from `arr` of size `patch_size`. The iteration can start from position `start_pos` in `arr`\n but drawing from a padded array extended by the `patch_size` in each dimension (so these coordinates can be negative\n to start in the padded region). If `copy_back` is True the values from each patch are written back to `arr`.\n\n Args:\n arr: array to iterate over\n patch_size: size of patches to generate slices for, 0 or None selects whole dimension\n start_pos: starting position in the array, default is 0 for each dimension\n copy_back: if True data from the yielded patches is copied back to `arr` once the generator completes\n mode: {``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``, ``\"mean\"``,\n ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n One of the listed string values or a user supplied function. Defaults to ``\"wrap\"``.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n pad_opts: padding options, see `numpy.pad`\n\n Yields:\n Patches of array data from `arr` which are views into a padded array which can be modified, if `copy_back` is\n True these changes will be reflected in `arr` once the iteration completes.\n \"\"\"\n # ensure patchSize and startPos are the right length\n patch_size_ = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # pad image by maximum values needed to ensure patches are taken from inside an image\n arrpad = np.pad(arr, tuple((p, p) for p in patch_size_), NumpyPadMode(mode).value, **pad_opts)\n\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, patch_size_))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, patch_size_))\n\n for slices in iter_patch_slices(iter_size, patch_size_, start_pos_padded):\n yield arrpad[slices]\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(patch_size_, arr.shape))\n arr[...] = arrpad[slices]\n\n\ndef get_valid_patch_size(image_size: Sequence[int], patch_size: Union[Sequence[int], int]) -> Tuple[int, ...]:\n \"\"\"\n Given an image of dimensions `image_size`, return a patch size tuple taking the dimension from `patch_size` if this is\n not 0/None. Otherwise, or if `patch_size` is shorter than `image_size`, the dimension from `image_size` is taken. This ensures\n the returned patch size is within the bounds of `image_size`. If `patch_size` is a single number this is interpreted as a\n patch of the same dimensionality of `image_size` with that size in each dimension.\n \"\"\"\n ndim = len(image_size)\n patch_size_ = ensure_tuple_size(patch_size, ndim)\n\n # ensure patch size dimensions are not larger than image dimension, if a dimension is None or 0 use whole dimension\n return tuple(min(ms, ps or ms) for ms, ps in zip(image_size, patch_size_))\n\n\ndef list_data_collate(batch: Sequence):\n \"\"\"\n Enhancement for PyTorch DataLoader default collate.\n If dataset already returns a list of batch data that generated in transforms, need to merge all data to 1 list.\n Then it's same as the default collate behavior.\n\n Note:\n Need to use this collate if apply some transforms that can generate batch data.\n\n \"\"\"\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n return default_collate(data)\n\n\ndef worker_init_fn(worker_id: int) -> None:\n \"\"\"\n Callback function for PyTorch DataLoader `worker_init_fn`.\n It can set different random seed for the transforms in different workers.\n\n \"\"\"\n worker_info = torch.utils.data.get_worker_info()\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))\n\n\ndef correct_nifti_header_if_necessary(img_nii):\n \"\"\"\n Check nifti object header's format, update the header if needed.\n In the updated image pixdim matches the affine.\n\n Args:\n img_nii: nifti image object\n \"\"\"\n dim = img_nii.header[\"dim\"][0]\n if dim >= 5:\n return img_nii # do nothing for high-dimensional array\n # check that affine matches zooms\n pixdim = np.asarray(img_nii.header.get_zooms())[:dim]\n norm_affine = np.sqrt(np.sum(np.square(img_nii.affine[:dim, :dim]), 0))\n if np.allclose(pixdim, norm_affine):\n return img_nii\n if hasattr(img_nii, \"get_sform\"):\n return rectify_header_sform_qform(img_nii)\n return img_nii\n\n\ndef rectify_header_sform_qform(img_nii):\n \"\"\"\n Look at the sform and qform of the nifti object and correct it if any\n incompatibilities with pixel dimensions\n\n Adapted from https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/io/misc_io.py\n\n Args:\n img_nii: nifti image object\n \"\"\"\n d = img_nii.header[\"dim\"][0]\n pixdim = np.asarray(img_nii.header.get_zooms())[:d]\n sform, qform = img_nii.get_sform(), img_nii.get_qform()\n norm_sform = np.sqrt(np.sum(np.square(sform[:d, :d]), 0))\n norm_qform = np.sqrt(np.sum(np.square(qform[:d, :d]), 0))\n sform_mismatch = not np.allclose(norm_sform, pixdim)\n qform_mismatch = not np.allclose(norm_qform, pixdim)\n\n if img_nii.header[\"sform_code\"] != 0:\n if not sform_mismatch:\n return img_nii\n if not qform_mismatch:\n img_nii.set_sform(img_nii.get_qform())\n return img_nii\n if img_nii.header[\"qform_code\"] != 0:\n if not qform_mismatch:\n return img_nii\n if not sform_mismatch:\n img_nii.set_qform(img_nii.get_sform())\n return img_nii\n\n norm = np.sqrt(np.sum(np.square(img_nii.affine[:d, :d]), 0))\n warnings.warn(f\"Modifying image pixdim from {pixdim} to {norm}\")\n\n img_nii.header.set_zooms(norm)\n return img_nii\n\n\ndef zoom_affine(affine: np.ndarray, scale: Sequence[float], diagonal: bool = True) -> np.ndarray:\n \"\"\"\n To make column norm of `affine` the same as `scale`. If diagonal is False,\n returns an affine that combines orthogonal rotation and the new scale.\n This is done by first decomposing `affine`, then setting the zoom factors to\n `scale`, and composing a new affine; the shearing factors are removed. If\n diagonal is True, returns a diagonal matrix, the scaling factors are set\n to the diagonal elements. This function always return an affine with zero\n translations.\n\n Args:\n affine (nxn matrix): a square matrix.\n scale: new scaling factor along each dimension.\n diagonal: whether to return a diagonal scaling matrix.\n Defaults to True.\n\n Raises:\n ValueError: When ``affine`` is not a square matrix.\n ValueError: When ``scale`` contains a nonpositive scalar.\n\n Returns:\n the updated `n x n` affine.\n\n \"\"\"\n\n affine = np.array(affine, dtype=float, copy=True)\n if len(affine) != len(affine[0]):\n raise ValueError(f\"affine must be n x n, got {len(affine)} x {len(affine[0])}.\")\n scale_np = np.array(scale, dtype=float, copy=True)\n if np.any(scale_np <= 0):\n raise ValueError(\"scale must contain only positive numbers.\")\n d = len(affine) - 1\n if len(scale_np) < d: # defaults based on affine\n norm = np.sqrt(np.sum(np.square(affine), 0))[:-1]\n scale_np = np.append(scale_np, norm[len(scale_np) :])\n scale_np = scale_np[:d]\n scale_np[scale_np == 0] = 1.0\n if diagonal:\n return np.diag(np.append(scale_np, [1.0]))\n rzs = affine[:-1, :-1] # rotation zoom scale\n zs = np.linalg.cholesky(rzs.T @ rzs).T\n rotation = rzs @ np.linalg.inv(zs)\n s = np.sign(np.diag(zs)) * np.abs(scale_np)\n # construct new affine with rotation and zoom\n new_affine = np.eye(len(affine))\n new_affine[:-1, :-1] = rotation @ np.diag(s)\n return new_affine\n\n\ndef compute_shape_offset(\n spatial_shape: np.ndarray, in_affine: np.ndarray, out_affine: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Given input and output affine, compute appropriate shapes\n in the output space based on the input array's shape.\n This function also returns the offset to put the shape\n in a good position with respect to the world coordinate system.\n\n Args:\n spatial_shape: input array's shape\n in_affine (matrix): 2D affine matrix\n out_affine (matrix): 2D affine matrix\n \"\"\"\n shape = np.array(spatial_shape, copy=True, dtype=float)\n sr = len(shape)\n in_affine = to_affine_nd(sr, in_affine)\n out_affine = to_affine_nd(sr, out_affine)\n in_coords = [(0.0, dim - 1.0) for dim in shape]\n corners = np.asarray(np.meshgrid(*in_coords, indexing=\"ij\")).reshape((len(shape), -1))\n corners = np.concatenate((corners, np.ones_like(corners[:1])))\n corners = in_affine @ corners\n corners_out = np.linalg.inv(out_affine) @ corners\n corners_out = corners_out[:-1] / corners_out[-1]\n out_shape = np.round(corners_out.ptp(axis=1) + 1.0)\n if np.allclose(nib.io_orientation(in_affine), nib.io_orientation(out_affine)):\n # same orientation, get translate from the origin\n offset = in_affine @ ([0] * sr + [1])\n offset = offset[:-1] / offset[-1]\n else:\n # different orientation, the min is the origin\n corners = corners[:-1] / corners[-1]\n offset = np.min(corners, 1)\n return out_shape.astype(int), offset\n\n\ndef to_affine_nd(r: Union[np.ndarray, int], affine: np.ndarray) -> np.ndarray:\n \"\"\"\n Using elements from affine, to create a new affine matrix by\n assigning the rotation/zoom/scaling matrix and the translation vector.\n\n when ``r`` is an integer, output is an (r+1)x(r+1) matrix,\n where the top left kxk elements are copied from ``affine``,\n the last column of the output affine is copied from ``affine``'s last column.\n `k` is determined by `min(r, len(affine) - 1)`.\n\n when ``r`` is an affine matrix, the output has the same as ``r``,\n the top left kxk elements are copied from ``affine``,\n the last column of the output affine is copied from ``affine``'s last column.\n `k` is determined by `min(len(r) - 1, len(affine) - 1)`.\n\n Args:\n r (int or matrix): number of spatial dimensions or an output affine to be filled.\n affine (matrix): 2D affine matrix\n\n Raises:\n ValueError: When ``affine`` dimensions is not 2.\n ValueError: When ``r`` is nonpositive.\n\n Returns:\n an (r+1) x (r+1) matrix\n\n \"\"\"\n affine_np = np.array(affine, dtype=np.float64)\n if affine_np.ndim != 2:\n raise ValueError(f\"affine must have 2 dimensions, got {affine_np.ndim}.\")\n new_affine = np.array(r, dtype=np.float64, copy=True)\n if new_affine.ndim == 0:\n sr = new_affine.astype(int)\n if not np.isfinite(sr) or sr < 0:\n raise ValueError(f\"r must be positive, got {sr}.\")\n new_affine = np.eye(sr + 1, dtype=np.float64)\n d = max(min(len(new_affine) - 1, len(affine_np) - 1), 1)\n new_affine[:d, :d] = affine_np[:d, :d]\n if d > 1:\n new_affine[:d, -1] = affine_np[:d, -1]\n return new_affine\n\n\ndef create_file_basename(\n postfix: str,\n input_file_name: str,\n folder_path: str,\n data_root_dir: str = \"\",\n) -> str:\n \"\"\"\n Utility function to create the path to the output file based on the input\n filename (extension is added by lib level writer before writing the file)\n\n Args:\n postfix: output name's postfix\n input_file_name: path to the input image file.\n folder_path: path for the output file\n data_root_dir: if not empty, it specifies the beginning parts of the input file's\n absolute path. This is used to compute `input_file_rel_path`, the relative path to the file from\n `data_root_dir` to preserve folder structure when saving in case there are files in different\n folders with the same file names.\n \"\"\"\n\n # get the filename and directory\n filedir, filename = os.path.split(input_file_name)\n # remove exntension\n filename, ext = os.path.splitext(filename)\n if ext == \".gz\":\n filename, ext = os.path.splitext(filename)\n # use data_root_dir to find relative path to file\n filedir_rel_path = \"\"\n if data_root_dir:\n filedir_rel_path = os.path.relpath(filedir, data_root_dir)\n\n # sub-folder path will be original name without the extension\n subfolder_path = os.path.join(folder_path, filedir_rel_path, filename)\n if not os.path.exists(subfolder_path):\n os.makedirs(subfolder_path)\n\n # add the sub-folder plus the postfix name to become the file basename in the output path\n return os.path.join(subfolder_path, filename + \"_\" + postfix)\n\n\ndef compute_importance_map(\n patch_size: Tuple[int, ...],\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n device: Optional[torch.device] = None,\n) -> torch.Tensor:\n \"\"\"Get importance map for different weight modes.\n\n Args:\n patch_size: Size of the required importance map. This should be either H, W [,D].\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: Sigma_scale to calculate sigma for each dimension\n (sigma = sigma_scale * dim_size). Used for gaussian mode only.\n device: Device to put importance map on.\n\n Raises:\n ValueError: When ``mode`` is not one of [\"constant\", \"gaussian\"].\n\n Returns:\n Tensor of size patch_size.\n\n \"\"\"\n mode = BlendMode(mode)\n if mode == BlendMode.CONSTANT:\n importance_map = torch.ones(patch_size, device=device).float()\n elif mode == BlendMode.GAUSSIAN:\n center_coords = [i // 2 for i in patch_size]\n sigma_scale = ensure_tuple_rep(sigma_scale, len(patch_size))\n sigmas = [i * sigma_s for i, sigma_s in zip(patch_size, sigma_scale)]\n\n importance_map = torch.zeros(patch_size, device=device)\n importance_map[tuple(center_coords)] = 1\n pt_gaussian = GaussianFilter(len(patch_size), sigmas).to(device=device, dtype=torch.float)\n importance_map = pt_gaussian(importance_map.unsqueeze(0).unsqueeze(0))\n importance_map = importance_map.squeeze(0).squeeze(0)\n importance_map = importance_map / torch.max(importance_map)\n importance_map = importance_map.float()\n\n # importance_map cannot be 0, otherwise we may end up with nans!\n min_non_zero = importance_map[importance_map != 0].min().item()\n importance_map = torch.clamp(importance_map, min=min_non_zero)\n else:\n raise ValueError(\n f\"Unsupported mode: {mode}, available options are [{BlendMode.CONSTANT}, {BlendMode.CONSTANT}].\"\n )\n\n return importance_map\n\n\ndef is_supported_format(filename: Union[Sequence[str], str], suffixes: Sequence[str]) -> bool:\n \"\"\"\n Verify whether the specified file or files format match supported suffixes.\n If supported suffixes is None, skip the verification and return True.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n suffixes: all the supported image suffixes of current reader, must be a list of lower case suffixes.\n\n \"\"\"\n filenames: Sequence[str] = ensure_tuple(filename)\n for name in filenames:\n tokens: Sequence[str] = PurePath(name).suffixes\n if len(tokens) == 0 or not any((\".\" + s.lower()) in \"\".join(tokens) for s in suffixes):\n return False\n\n return True\n"
] | [
[
"numpy.diag",
"torch.max",
"torch.zeros",
"numpy.any",
"numpy.square",
"numpy.ones_like",
"numpy.allclose",
"torch.ones",
"numpy.eye",
"torch.utils.data.get_worker_info",
"torch.utils.data._utils.collate.default_collate",
"numpy.min",
"numpy.linalg.inv",
"numpy.append",
"numpy.linalg.cholesky",
"numpy.meshgrid",
"numpy.array",
"numpy.abs",
"numpy.isfinite",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Haddy1/glacier_bayes_unet | [
"dad1bf79e5384404c2de9765027e499487dbb24d"
] | [
"utils/metrics.py"
] | [
"import numpy as np\nimport tensorflow.keras.backend as K\nimport tensorflow as tf\nfrom scipy.spatial.distance import cdist\n\ndef dice_coefficient(u,v):\n \"\"\"\n For binary vectors the Dice cooefficient can be written as\n 2 * |u * v| / (|u**2| + |v**2|)\n\n | u * v | gives intersecting set\n |u**2|, |v**2| number of (true) elements in set\n\n :param u: binary vector\n :param v: binary vector of same length as u\n :return: dice coefficient\n \"\"\"\n c_uv = np.sum(u*v)\n if c_uv == 0:\n return 0\n else:\n c_u = np.sum(u**2)\n c_v = np.sum(v**2)\n return 2 * c_uv / (c_u + c_v)\n\ndef dice_coefficient_cutoff(gt, pred, cutoff):\n \"\"\"\n Binarizes pred using cutoff as threshold\n and computes dice coefficent for boolean vectors\n Used for threshold optimization\n\n :param gt: binary vector\n :param pred: vector\n :param cutoff: cutoff threshold in same range as pred\n :return dice coefficient\n \"\"\"\n pred_bin = (pred >= cutoff).astype(int)\n return dice_coefficient(gt, pred_bin)\n\ndef IOU(y_true, y_pred):\n \"\"\"\n Returns Intersection over Union (IOU)\n :param y_true: binary vector of ground truth\n :param y_pred: binary vector prediction, same length as u\n :return: IOU\n \"\"\"\n\n intersection = np.sum(y_true * y_pred)\n union = np.sum(y_true.astype(np.bool) | y_pred.astype(np.bool))\n return intersection / union\n\n\ndef specificity(y_true, y_pred):\n \"\"\"\n Returns specificity\n\n :param y_true: binary vector of ground truth\n :param y_pred: binary vector of prediction, same length as u\n :return: specificity value\n \"\"\"\n neg_y_true = 1 - y_true\n neg_y_pred = 1 - y_pred\n fp = np.sum(neg_y_true * y_pred)\n tn = np.sum(neg_y_true * neg_y_pred)\n result = tn / (tn + fp + K.epsilon())\n return result\n\ndef euclidian_tf(y_true, y_pred):\n \"\"\"\n :param y_true: binary vector of ground truth\n :param y_pred: binary vector of prediction, same length as u\n :return: euclidian distance between y_true and y_pred\n \"\"\"\n return tf.reduce_sum((y_true - y_pred)**2)\n\ndef line_graph(y_true, y_pred):\n gt_line = np.where(y_true)\n pred_line = np.where(y_pred)\n gt_y_start = np.argmin(gt_line[0])\n gt_start = (gt_y_start, np.argmin(gt_line[1][gt_y_start]))\n gt_dist = cdist(gt_line, gt_line)\n\n\ndef line_accuracy(y_true, y_pred):\n intersection = np.sum(y_true == y_pred)\n union = np.sum(y_true)\n if union == 0:\n if intersection == 0:\n return 1\n else:\n return 0\n else:\n return intersection / union\n"
] | [
[
"tensorflow.reduce_sum",
"scipy.spatial.distance.cdist",
"numpy.argmin",
"tensorflow.keras.backend.epsilon",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
kulkarnisp/data-driven-discretisation | [
"1819bfeb934978abd31e66fb096741029ea8b901"
] | [
"pydisc/loader.py"
] | [
"import os\r\n# import xarray\r\nimport scipy.io\r\nimport torch\r\nfrom torch.utils.data import Dataset,DataLoader,TensorDataset\r\n\r\ndatapath__ = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'datasets'))\r\n\r\n# matdata1 = scipy.io.loadmat('up.mat')\r\nmatdata2 = scipy.io.loadmat(os.path.join(datapath__,'van.mat'))\r\n# upwind=torch.tensor(matdata1['out'])\r\nvanleer=torch.tensor(matdata2['out'])\r\n# upwind.squeeze_(1).squeeze_(-1)\r\nvanler_data = torch.squeeze(vanleer,-1)\r\n\r\nclass Simulation:\r\n def __init__(self,array):\r\n self.dat = array\r\n self.Length = 32.0\r\n self.Time = 1.0\r\n nt,nf,nx = array.shape\r\n self.nT = nt\r\n self.nL = nx\r\n self._caldt()\r\n\r\n def _caldx(self):\r\n self.dx = self.Length/self.nL\r\n self.dt = self.Time/self.nT\r\n self.CFLd = self.dx/self.dt**2\r\n \r\n def _caldt(self):\r\n self.dx = self.Length/self.nL\r\n self.CFLa = 0.5\r\n self.dt = self.dx*self.CFLa\r\n\r\n\r\ndef get_loader(x,y,BATCH_SIZE=64):\r\n tset = TensorDataset(x,y)\r\n dloader = DataLoader(tset, batch_size=BATCH_SIZE, shuffle=False,drop_last=True)\r\n return dloader\r\nget_train = lambda x : (x[:-1],x[1:])\r\n\r\n\r\n\r\n\r\n\r\n# def load_ith_init_condition(i):\r\n# van_squeeze=sqzfinearray(van_squeeze_all,i)\r\n\r\n# vanleer_ground_coarse=regrid(van_squeeze,8)\r\n\r\n# dat=vanleer_ground_coarse.unsqueeze(1)\r\n\r\n# sim_details = Simulation(dat.shape)\r\n\r\n# num_steps=1\r\n# get_train = lambda x : (x[:-num_steps],x[num_steps:])\r\n# x,y = get_train(dat.data) \r\n# BATCH_SIZE = x.shape[0]\r\n# tset = TensorDataset(x,y)\r\n# dloader = DataLoader(tset, batch_size=BATCH_SIZE, shuffle=False,drop_last=True)\r\n \r\n# return dloader,sim_details\r\n\r\n"
] | [
[
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.squeeze",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ericangelokim/sagemaker-inference-toolkit | [
"24db871b1b193ac1a924c21be8c3ec48853b3263"
] | [
"test/unit/test_encoder.py"
] | [
"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License'). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the 'license' file accompanying this file. This file is\n# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom mock import Mock, patch\nimport numpy as np\nimport pytest\nfrom six import BytesIO\n\nfrom sagemaker_inference import content_types, encoder, errors\n\n\[email protected]('target', ([42, 6, 9], [42., 6., 9.], ['42', '6', '9'], [u'42', u'6', u'9'], {42: {'6': 9.}}))\ndef test_array_to_npy(target):\n input_data = np.array(target)\n\n actual = encoder._array_to_npy(input_data)\n\n np.testing.assert_equal(np.load(BytesIO(actual), allow_pickle=True), np.array(target))\n\n actual = encoder._array_to_npy(target)\n\n np.testing.assert_equal(np.load(BytesIO(actual), allow_pickle=True), np.array(target))\n\n\[email protected](\n 'target, expected', [([42, 6, 9], '[42, 6, 9]'),\n ([42., 6., 9.], '[42.0, 6.0, 9.0]'),\n (['42', '6', '9'], '[\"42\", \"6\", \"9\"]'),\n ({42: {'6': 9.}}, '{\"42\": {\"6\": 9.0}}')]\n)\ndef test_array_to_json(target, expected):\n actual = encoder._array_to_json(target)\n np.testing.assert_equal(actual, expected)\n\n actual = encoder._array_to_json(np.array(target))\n np.testing.assert_equal(actual, expected)\n\n\ndef test_array_to_json_exception():\n with pytest.raises(TypeError):\n encoder._array_to_json(lambda x: 3)\n\n\[email protected](\n 'target, expected', [([42, 6, 9], '42\\n6\\n9\\n'),\n ([42., 6., 9.], '42.0\\n6.0\\n9.0\\n'),\n (['42', '6', '9'], '42\\n6\\n9\\n')])\ndef test_array_to_csv(target, expected):\n actual = encoder._array_to_csv(target)\n np.testing.assert_equal(actual, expected)\n\n actual = encoder._array_to_csv(np.array(target))\n np.testing.assert_equal(actual, expected)\n\n\[email protected](\n 'content_type', [content_types.JSON, content_types.CSV, content_types.NPY]\n)\ndef test_encode(content_type):\n mock_encoder = Mock()\n with patch.dict(encoder._encoder_map, {content_type: mock_encoder}, clear=True):\n encoder.encode(42, content_type)\n\n mock_encoder.assert_called_once_with(42)\n\n\ndef test_encode_error():\n with pytest.raises(errors.UnsupportedFormatError):\n encoder.encode(42, content_types.OCTET_STREAM)\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QiangZiBro/compass | [
"30c3d89a69e496185488dd5f596de517f59624fc"
] | [
"utils/geometry.py"
] | [
"import open3d as o3d\nfrom spherical_voxel import spherical_voxel as sv\nfrom lie_learn.spaces import S2\nfrom typing import Tuple\nimport numpy as np\nimport math\nimport copy\nimport torch\n\n\ndef get_rotation_matrix(alfa, beta, gamma, hom_coord=False):\n \"\"\"\n Create a rotation matrix with an optional fourth homogeneous coordinate\n\n :param a, b, c: ZYZ-Euler angles\n \"\"\"\n\n def z(a):\n return np.array([[np.cos(a), np.sin(a), 0, 0],\n [-np.sin(a), np.cos(a), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n def y(a):\n return np.array([[np.cos(a), 0, -np.sin(a), 0],\n [0, 1, 0, 0],\n [np.sin(a), 0, np.cos(a), 0],\n [0, 0, 0, 1]])\n\n r = z(gamma).dot(y(beta)).dot(z(alfa)) # pylint: disable=E1101\n\n if hom_coord:\n return r\n else:\n return r[:3, :3]\n\n\ndef get_random_rotation_matrix(hom_coord=False):\n\n alfa = np.random.rand() * 2 * np.pi\n beta = np.random.rand() * 2 - 1\n gamma = np.random.rand() * 2 * np.pi\n\n mat = get_rotation_matrix(alfa, np.arccos(beta), gamma, hom_coord)\n\n return mat\n\n\ndef spherical_voxel_optimized(points: np.ndarray, size_bandwidth: int, size_radial_divisions: int,\n radius_support: float, do_random_sampling: bool, num_random_points: int) \\\n -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Compute spherical voxel using the C++ code.\n\n Compute Spherical Voxel signal as defined in:\n Pointwise Rotation-Invariant Network with Adaptive Sampling and 3D Spherical Voxel Convolution.\n Yang You, Yujing Lou, Qi Liu, Yu-Wing Tai, Weiming Wang, Lizhuang Ma and Cewu Lu.\n AAAI 2020.\n\n :param points: the points to convert.\n :param size_bandwidth: alpha and beta bandwidth.\n :param size_radial_divisions: the number of bins along radial dimension.\n :param radius_support: the radius used to compute the points in the support.\n :param do_random_sampling: if true a subset of random points will be used to compute the spherical voxel.\n :param num_random_points: the number of points to keep if do_random_sampling is true.\n\n :return: A tuple containing:\n The spherical voxel, shape(size_radial_divisions, 2 * size_bandwidth, 2 * size_bandwidth).\n The points used to compute the signal normalized according the the farthest point.\n \"\"\"\n if do_random_sampling:\n min_limit = 1 if points.shape[0] > 1 else 0\n indices_random = np.random.randint(min_limit, points.shape[0], num_random_points)\n points = points[indices_random]\n\n pts_norm = np.linalg.norm(points, axis=1)\n # Scale points to fit unit sphere\n pts_normed = points / pts_norm[:, None]\n pts_normed = np.clip(pts_normed, -1, 1)\n\n pts_s2_coord = S2.change_coordinates(pts_normed, p_from='C', p_to='S')\n # Convert to spherical voxel indices\n pts_s2_coord[:, 0] *= 2 * size_bandwidth / np.pi # [0, pi]\n pts_s2_coord[:, 1] *= size_bandwidth / np.pi\n pts_s2_coord[:, 1][pts_s2_coord[:, 1] < 0] += 2 * size_bandwidth\n\n # Adaptive sampling factor\n daas_weights = np.sin(np.pi * (2 * np.arange(2 * size_bandwidth) + 1) / 4 / size_bandwidth).astype(np.float32)\n voxel = np.asarray(sv.compute(pts_on_s2=pts_s2_coord,\n pts_norm=pts_norm,\n size_bandwidth=size_bandwidth,\n size_radial_divisions=size_radial_divisions,\n radius_support=radius_support,\n daas_weights=daas_weights))\n pts_normed = points / np.max(pts_norm)\n return voxel.astype(np.float32), pts_normed.astype(np.float32)\n\n\ndef lrf_repeatability(lrfs_src, lrfs_trg, mat_from_src_to_trg, th_cosine=0.97):\n \"\"\"\n Compute local reference frame repeatability considering the cosine of angles between x and z axes.\n Two frames are repeatable if the cosines between x and z axes are greater than th_cosine\n :param lrfs_src: local reference frames on src (axes on COLUMNS)\n :param lrfs_trg: local reference frames on trg (axes on COLUMNS)\n :param mat_from_src_to_trg: matrices from src to trg\n :param th_cosine: threshold on cosine to consider the axes repeatable, default 0.97\n :return: an array with 0 and 1 indicating whether the lrf is repeatable or not\n \"\"\"\n\n lrfs_src_in_trg = mat_from_src_to_trg @ lrfs_src\n\n muls = np.multiply(lrfs_src_in_trg, lrfs_trg)\n\n dots = np.sum(muls, axis=1)\n\n dots_x = dots[:, 0]\n dots_z = dots[:, 2]\n\n positives_x = dots_x >= th_cosine\n positives_z = dots_z >= th_cosine\n\n res = positives_x * positives_z\n\n return 0 + res\n\n\ndef get_dimensions(cloud):\n min = np.min(cloud, axis=0)\n max = np.max(cloud, axis=0)\n\n return (max - min)\n\n\ndef get_max_radius(cloud):\n bb_cloud = get_dimensions(cloud)\n return np.max(bb_cloud)*0.5\n\n\ndef get_overlapping_area(cloud_a, cloud_b, mat_pose_a, mat_pose_b, min_euclidean_distance_to_consider_kp_in_fragment):\n\n cloud_a_in_gt = copy.deepcopy(cloud_a)\n cloud_b_in_gt = copy.deepcopy(cloud_b)\n\n cloud_a_in_gt.transform(mat_pose_a)\n cloud_b_in_gt.transform(mat_pose_b)\n\n kdtree_b = o3d.geometry.KDTreeFlann(cloud_b_in_gt)\n kdtree_a = o3d.geometry.KDTreeFlann(cloud_a_in_gt)\n\n dists = []\n\n for i in range(0, np.array(cloud_a_in_gt.points).shape[0]):\n [_, idx_a, dist_a] = kdtree_b.search_knn_vector_3d(cloud_a_in_gt.points[i], 1)\n [_, idx_b, _] = kdtree_a.search_knn_vector_3d(cloud_b_in_gt.points[idx_a[0]], 1)\n\n if idx_b[0] == i and dist_a[0] < min_euclidean_distance_to_consider_kp_in_fragment:\n dists.append(dist_a[0])\n\n overlap_area = np.array(dists).size / np.array(cloud_a_in_gt.points).shape[0]\n\n return overlap_area\n\n\ndef compute_nearest_search_intersection(cloud_src, cloud_trg, min_distance):\n\n indices_trg = []\n indices_src = []\n\n kdtree = o3d.geometry.KDTreeFlann(cloud_trg)\n\n for idx_src, point_src in enumerate(cloud_src.points):\n [k, idx, distance] = kdtree.search_knn_vector_3d(point_src, 1)\n\n if np.sqrt(distance[0]) <= min_distance:\n indices_trg.append(idx[0])\n indices_src.append(idx_src)\n\n cloud_points_on_trg = o3d.geometry.PointCloud()\n cloud_points_on_trg.points = o3d.utility.Vector3dVector(np.asarray(cloud_trg.points)[indices_trg])\n\n return cloud_points_on_trg, np.asarray(indices_trg), np.asarray(indices_src)\n\n\n\n\n# What follows is taken from https://www.lfd.uci.edu/~gohlke/code/transformations.py.html\n# and is subject to the following conditions\n\n# Copyright (c) 2006-2019, Christoph Gohlke\n# Copyright (c) 2006-2019, The Regents of the University of California\n# Produced at the Laboratory for Fluorescence Dynamics\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\ndef unit_vector(data, axis=None, out=None):\n \"\"\"Return ndarray normalized by length, i.e. Euclidean norm, along axis.\n\n >>> v0 = np.random.random(3)\n >>> v1 = unit_vector(v0)\n >>> np.allclose(v1, v0 / np.linalg.norm(v0))\n True\n >>> v0 = np.random.rand(5, 4, 3)\n >>> v1 = unit_vector(v0, axis=-1)\n >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2)\n >>> np.allclose(v1, v2)\n True\n >>> v1 = unit_vector(v0, axis=1)\n >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1)\n >>> np.allclose(v1, v2)\n True\n >>> v1 = np.empty((5, 4, 3))\n >>> unit_vector(v0, axis=1, out=v1)\n >>> np.allclose(v1, v2)\n True\n >>> list(unit_vector([]))\n []\n >>> list(unit_vector([1]))\n [1.0]\n\n \"\"\"\n if out is None:\n data = np.array(data, dtype=np.float64, copy=True)\n if data.ndim == 1:\n data /= math.sqrt(np.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = np.array(data, copy=False)\n data = out\n length = np.atleast_1d(np.sum(data*data, axis))\n np.sqrt(length, length)\n if axis is not None:\n length = np.expand_dims(length, axis)\n data /= length\n if out is None:\n return data\n\ndef rotation_matrix(angle, direction, point=None):\n \"\"\"Return matrix to rotate about axis defined by point and direction.\n\n >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])\n >>> np.allclose(np.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])\n True\n >>> angle = (random.random() - 0.5) * (2*math.pi)\n >>> direc = np.random.random(3) - 0.5\n >>> point = np.random.random(3) - 0.5\n >>> R0 = rotation_matrix(angle, direc, point)\n >>> R1 = rotation_matrix(angle-2*math.pi, direc, point)\n >>> is_same_transform(R0, R1)\n True\n >>> R0 = rotation_matrix(angle, direc, point)\n >>> R1 = rotation_matrix(-angle, -direc, point)\n >>> is_same_transform(R0, R1)\n True\n >>> I = np.identity(4, np.float64)\n >>> np.allclose(I, rotation_matrix(math.pi*2, direc))\n True\n >>> np.allclose(2, np.trace(rotation_matrix(math.pi/2,\n ... direc, point)))\n True\n\n \"\"\"\n sina = math.sin(angle)\n cosa = math.cos(angle)\n direction = unit_vector(direction[:3])\n # rotation matrix around unit vector\n R = np.diag([cosa, cosa, cosa])\n R += np.outer(direction, direction) * (1.0 - cosa)\n direction *= sina\n R += np.array([[ 0.0, -direction[2], direction[1]],\n [ direction[2], 0.0, -direction[0]],\n [-direction[1], direction[0], 0.0]])\n M = np.identity(4)\n M[:3, :3] = R\n if point is not None:\n # rotation not around origin\n point = np.array(point[:3], dtype=np.float64, copy=False)\n M[:3, 3] = point - np.dot(R, point)\n return M\n\n\ndef rotation_from_matrix(matrix):\n \"\"\"Return rotation angle and axis from rotation matrix.\n\n >>> angle = (random.random() - 0.5) * (2*math.pi)\n >>> direc = np.random.random(3) - 0.5\n >>> point = np.random.random(3) - 0.5\n >>> R0 = rotation_matrix(angle, direc, point)\n >>> angle, direc, point = rotation_from_matrix(R0)\n >>> R1 = rotation_matrix(angle, direc, point)\n >>> is_same_transform(R0, R1)\n True\n\n \"\"\"\n R = np.array(matrix, dtype=np.float64, copy=False)\n R33 = R[:3, :3]\n # direction: unit eigenvector of R33 corresponding to eigenvalue of 1\n w, W = np.linalg.eig(R33.T)\n i = np.where(abs(np.real(w) - 1.0) < 1e-4)[0] # 1e-8 was apparently too strict (Federico Stella)\n if not len(i):\n raise ValueError('no unit eigenvector corresponding to eigenvalue 1')\n direction = np.real(W[:, i[-1]]).squeeze()\n # point: unit eigenvector of R33 corresponding to eigenvalue of 1\n w, Q = np.linalg.eig(R)\n i = np.where(abs(np.real(w) - 1.0) < 1e-8)[0]\n if not len(i):\n raise ValueError('no unit eigenvector corresponding to eigenvalue 1')\n point = np.real(Q[:, i[-1]]).squeeze()\n point /= point[3]\n # rotation angle depending on direction\n cosa = (np.trace(R33) - 1.0) / 2.0\n if abs(direction[2]) > 1e-8:\n sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]\n elif abs(direction[1]) > 1e-8:\n sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]\n else:\n sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]\n angle = math.atan2(sina, cosa)\n return angle, direction, point\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.asarray",
"numpy.max",
"numpy.trace",
"numpy.random.randint",
"numpy.clip",
"numpy.linalg.eig",
"numpy.arange",
"numpy.sin",
"numpy.real",
"numpy.outer",
"numpy.multiply",
"numpy.min",
"numpy.arccos",
"numpy.identity",
"numpy.random.rand",
"numpy.array",
"numpy.sum",
"numpy.linalg.norm",
"numpy.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
digital-idiot/sbnet | [
"cf8ea06430c8d8c8d7c5af266a6f926fdde12312"
] | [
"sbnet_tensorflow/benchmark/tf_conv_dims_tests.py"
] | [
"\"\"\"\n\n Sparse Blocks Network\n Copyright (c) 2017, Uber Technologies, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\n\n#\n# Unit tests for tf_conv_dims.py\n#\nfrom __future__ import division, print_function, unicode_literals\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass CalcOutSizeTests(tf.test.TestCase):\n def _test_calc_out_size(self, in_size, ksize, stride, padding):\n from tf_conv_dims import calc_out_size_1d\n x = tf.ones([1, in_size, in_size, 1], dtype=tf.float32)\n w = tf.ones([ksize, ksize, 1, 1], dtype=tf.float32)\n y = tf.nn.conv2d(x, w, [1, stride, stride, 1], padding)\n with self.test_session():\n out_size_exp = tf.shape(y)[1].eval()\n out_size_act = calc_out_size_1d(in_size, ksize, stride, padding).eval()\n self.assertEqual(out_size_act, out_size_exp)\n\n def test_calc_out_size(self):\n for insize in [6, 7, 10, 11]:\n for ksize in [1, 2, 3, 4, 7]:\n for stride in [1, 2, 3]:\n for padding in ['SAME', 'VALID']:\n if ksize <= insize:\n self._test_calc_out_size(insize, ksize, stride, padding)\n\n\nclass CalcOutSizeDeconvTests(tf.test.TestCase):\n def _test_calc_in_size(self, out_size, ksize, stride, padding):\n from tf_conv_dims import calc_out_size_1d_np\n w = tf.ones([ksize, ksize, 1, 1], dtype=tf.float32)\n in_size = calc_out_size_1d_np(out_size, ksize, 1 / float(stride), padding)\n x = tf.ones([1, in_size, in_size, 1], dtype=tf.float32)\n y = tf.nn.conv2d(x, w, [1, stride, stride, 1], padding)\n with self.test_session():\n out_size_exp = tf.shape(y)[1].eval()\n self.assertEqual(out_size, out_size_exp)\n\n def test_calc_out_size(self):\n for insize in [6, 7, 10, 11]:\n for ksize in [1, 2, 3, 4, 7]:\n for stride in [1, 2, 3]:\n # Fractional stride methods only works for SAME.\n # Instead of calc out_size, maybe possible to have another function to calc\n # in_size\n for padding in ['SAME']:\n if ksize <= insize:\n self._test_calc_in_size(insize, ksize, stride, padding)\n\n\nclass CalcPaddingTests(tf.test.TestCase):\n def test_calc_padding(self):\n from tf_conv_dims import calc_padding_4d\n x = tf.zeros([1, 5, 6, 1])\n p_exp = np.array([0, 1, 1, 1], dtype=np.int32)\n p = calc_padding_4d(tf.shape(x), [2, 3, 1, 1], [1, 1, 1, 1], 'SAME')\n p = tf.stack(p)\n with self.test_session():\n p_act = p.eval()\n np.testing.assert_array_equal(p_act, p_exp)\n\n def test_calc_padding_err_ksize_list(self):\n from tf_conv_dims import calc_padding_4d\n x = tf.zeros([1, 5, 6, 1])\n err_raised = False\n try:\n calc_padding_4d(tf.shape(x), [2, 3, 1, 1, 1], [2, 1, 1, 1], 'SAME')\n except AssertionError as e:\n self.assertEqual(e.message, 'Expect `ksize` a list/tuple of length 4.')\n err_raised = True\n self.assertTrue(err_raised)\n\n def test_calc_padding_err_strides_list(self):\n from tf_conv_dims import calc_padding_4d\n x = tf.zeros([1, 5, 6, 1])\n err_raised = False\n try:\n calc_padding_4d(tf.shape(x), [2, 3, 1, 1], [2, 1, 1, 1], 'SAME')\n except AssertionError as e:\n self.assertEqual(e.message, 'Expect first and last dimension of `strides` = 1.')\n err_raised = True\n self.assertTrue(err_raised)\n\n def test_calc_padding_err_strides_tensor(self):\n from tf_conv_dims import calc_padding_4d\n x = tf.zeros([1, 5, 6, 1])\n err_raised = False\n p = calc_padding_4d(tf.shape(x), [2, 3, 1, 1], tf.constant(np.array([2, 1, 1, 1])), 'SAME')\n p = tf.stack(p)\n with self.test_session():\n try:\n p.eval()\n except tf.errors.InvalidArgumentError as e:\n self.assertTrue(\n e.message.startswith(\n 'assertion failed: [Expect first and last dimension of `strides` = 1.]'))\n err_raised = True\n\n self.assertTrue(err_raised)\n\n def test_calc_padding_valid(self):\n from tf_conv_dims import calc_padding_4d\n x = tf.zeros([1, 5, 5, 1])\n p_exp = np.array([0, 0, 0, 0], dtype=np.int32)\n p = calc_padding_4d(tf.shape(x), [2, 3, 1, 1], [1, 1, 1, 1], 'VALID')\n p = tf.stack(p)\n with self.test_session():\n p_act = p.eval()\n np.testing.assert_array_equal(p_act, p_exp)\n\n def test_calc_padding_stride(self):\n from tf_conv_dims import calc_padding_4d\n x = tf.zeros([1, 5, 6, 1])\n p_exp = np.array([0, 1, 0, 1], dtype=np.int32)\n p = calc_padding_4d(tf.shape(x), [2, 3, 1, 1], [1, 2, 2, 1], 'SAME')\n p = tf.stack(p)\n with self.test_session():\n p_act = p.eval()\n np.testing.assert_array_equal(p_act, p_exp)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.ones",
"tensorflow.test.main",
"numpy.testing.assert_array_equal",
"numpy.array",
"tensorflow.nn.conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ahama92/outlier-detection | [
"60a723d07ea71ac312c474c007c20e6b46106d7b"
] | [
"test.py"
] | [
"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\n# First set up the figure, the axis, and the plot element we want to animate\nfig = plt.figure()\nax = plt.axes(xlim=(0, 2), ylim=(-2, 2))\nline, = ax.plot([], [], lw=2)\n\n# initialization function: plot the background of each frame\ndef init():\n line.set_data([], [])\n return line,\n\n# animation function. This is called sequentially\ndef animate(i):\n x = np.linspace(0, 2, 1000)\n y = np.sin(2 * np.pi * (x - 0.01 * i))\n line.set_data(x, y)\n return line,\n\n# call the animator. blit=True means only re-draw the parts that have changed.\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=50, interval=20, blit=True)\n\n# save the animation as an mp4. This requires ffmpeg or mencoder to be\n# installed. The extra_args ensure that the x264 codec is used, so that\n# the video can be embedded in html5. You may need to adjust this for\n# your system: for more information, see\n# http://matplotlib.sourceforge.net/api/animation_api.html\nanim.save('test_animation.mp4', writer='ffmpeg', fps=30, extra_args=['-vcodec', 'libx264'])\n\nplt.show()"
] | [
[
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.axes",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShichengChen/wavenet-generate-music | [
"cbba2264a83a44bfad964e3161769b5b5fa22536"
] | [
"readx.py"
] | [
"import datetime\n\nimport h5py\nimport numpy as np\nimport torch\nfrom torch.utils import data\nimport torch.nn.functional as F\nfrom transformData import mu_law_encode, mu_law_encode\n\nsampleSize = 16000\nsample_rate = 16000 # the length of audio for one second\n\n\nclass Dataset(data.Dataset):\n def __init__(self, listx, rootx,pad, transform=None):\n self.rootx = rootx\n self.listx = listx\n self.pad=int(pad)\n #self.device=device\n self.transform = transform\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.listx)\n\n def __getitem__(self, index):\n np.random.seed()\n namex = self.listx[index]\n\n h5f = h5py.File(self.rootx + str(namex) + '.h5', 'r')\n x = h5f['x'][:]\n\n factor1 = np.random.uniform(low=0.83, high=1.0)\n x = x*factor1\n\n x = mu_law_encode(x)\n\n x = torch.from_numpy(x.reshape(-1)).type(torch.LongTensor)\n #x = F.pad(y, (self.pad, self.pad), mode='constant', value=127)\n\n\n return namex,x.type(torch.LongTensor)\n\n\nclass RandomCrop(object):\n def __init__(self, pad,output_size=sample_rate):\n self.output_size = output_size\n self.pad=pad\n\n def __call__(self, sample):\n #print('randomcrop',np.random.get_state()[1][0])\n np.random.seed(datetime.datetime.now().second + datetime.datetime.now().microsecond)\n x, y = sample['x'], sample['y']\n shrink = 0\n #startx = np.random.randint(self.pad + shrink * sampleSize, x.shape[-1] - sampleSize - self.pad - shrink * sampleSize)\n #print(startx)\n #x = x[startx - pad:startx + sampleSize + pad]\n #y = y[startx:startx + sampleSize]\n l = np.random.uniform(0.25, 0.5)\n sp = np.random.uniform(0, 1 - l)\n step = np.random.uniform(-0.5, 0.5)\n ux = int(sp * sample_rate)\n lx = int(l * sample_rate)\n # x[ux:ux + lx] = librosa.effects.pitch_shift(x[ux:ux + lx], sample_rate, n_steps=step)\n\n return {'x': x, 'y': y}\n\n\nclass ToTensor(object):\n def __call__(self, sample):\n x, y = sample['x'], sample['y']\n return {'x': torch.from_numpy(x.reshape(1, -1)).type(torch.float32),\n 'y': torch.from_numpy(y.reshape(-1)).type(torch.LongTensor)}\n\n\nclass Testset(data.Dataset):\n def __init__(self, listx, rootx,pad,dilations1,device):\n self.rootx = rootx\n self.listx = listx\n self.pad = int(pad)\n self.device=device\n self.dilations1=dilations1\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.listx)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n namex = self.listx[index]\n\n h5f = h5py.File(self.rootx + str(namex) + '.h5', 'r')\n x = h5f['x'][:]\n\n queue = []\n for i in self.dilations1:\n queue.append(torch.normal(torch.zeros(64,i),std=1).to(self.device))\n #queue.append(torch.zeros((64,i), dtype=torch.float32).to(self.device))\n\n x = mu_law_encode(x)\n\n x = torch.from_numpy(x.reshape(-1)).type(torch.LongTensor)\n #y = (torch.randint(0, 255, (self.field)).long())\n\n return namex,x,queue"
] | [
[
"numpy.random.uniform",
"numpy.random.seed",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lolrudy/GPV_pose | [
"f326a623b3e45e6edfc1963b068e8e7aaea2bfff"
] | [
"engine/train.py"
] | [
"import os\nimport random\n\nimport mmcv\nimport torch\nfrom absl import app\n\nfrom config.config import *\nfrom tools.training_utils import build_lr_rate, get_gt_v, build_optimizer\nfrom network.GPVPose import GPVPose\n\nFLAGS = flags.FLAGS\nfrom datasets.load_data import PoseDataset\nimport numpy as np\nimport time\n\n# from creating log\nimport tensorflow as tf\nfrom tools.eval_utils import setup_logger, compute_sRT_errors\ntorch.autograd.set_detect_anomaly(True)\ndevice = 'cuda'\n\ndef train(argv):\n if not os.path.exists(FLAGS.model_save):\n os.makedirs(FLAGS.model_save)\n tf.compat.v1.disable_eager_execution()\n tb_writter = tf.compat.v1.summary.FileWriter(FLAGS.model_save)\n logger = setup_logger('train_log', os.path.join(FLAGS.model_save, 'log.txt'))\n for key, value in vars(FLAGS).items():\n logger.info(key + ':' + str(value))\n Train_stage = 'PoseNet_only'\n network = GPVPose(Train_stage)\n network = network.to(device)\n # resume or not\n if FLAGS.resume:\n network.load_state_dict(torch.load(FLAGS.resume_model))\n s_epoch = FLAGS.resume_point\n else:\n s_epoch = 0\n\n # build dataset annd dataloader\n train_dataset = PoseDataset(source=FLAGS.dataset, mode='train',\n data_dir=FLAGS.dataset_dir, per_obj=FLAGS.per_obj)\n # start training datasets sampler\n st_time = time.time()\n train_steps = FLAGS.train_steps\n global_step = train_steps * s_epoch # record the number iteration\n train_size = train_steps * FLAGS.batch_size\n indices = []\n page_start = - train_size\n\n # build optimizer\n param_list = network.build_params(training_stage_freeze=[])\n optimizer = build_optimizer(param_list)\n optimizer.zero_grad() # first clear the grad\n scheduler = build_lr_rate(optimizer, total_iters=train_steps * FLAGS.total_epoch // FLAGS.accumulate)\n # training iteration, this code is develop based on object deform net\n for epoch in range(s_epoch, FLAGS.total_epoch):\n # train one epoch\n logger.info('Time {0}'.format(time.strftime(\"%Hh %Mm %Ss\", time.gmtime(time.time() - st_time)) + \\\n ', ' + 'Epoch %02d' % epoch + ', ' + 'Training started'))\n # create optimizer and adjust learning rate accordingly\n # sample train subset\n page_start += train_size\n len_last = len(indices) - page_start\n if len_last < train_size:\n indices = indices[page_start:]\n if FLAGS.dataset == 'CAMERA+Real':\n # CAMERA : Real = 3 : 1\n camera_len = train_dataset.subset_len[0]\n real_len = train_dataset.subset_len[1]\n real_indices = list(range(camera_len, camera_len + real_len))\n camera_indices = list(range(camera_len))\n n_repeat = (train_size - len_last) // (4 * real_len) + 1\n data_list = random.sample(camera_indices, 3 * n_repeat * real_len) + real_indices * n_repeat\n random.shuffle(data_list)\n indices += data_list\n else:\n data_list = list(range(train_dataset.length))\n for i in range((train_size - len_last) // train_dataset.length + 1):\n random.shuffle(data_list)\n indices += data_list\n page_start = 0\n train_idx = indices[page_start:(page_start + train_size)]\n train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_idx)\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=FLAGS.batch_size,\n sampler=train_sampler,\n num_workers=FLAGS.num_workers, pin_memory=True)\n network.train()\n\n #################################\n for i, data in enumerate(train_dataloader, 1):\n\n output_dict, loss_dict \\\n = network(rgb=data['roi_img'].to(device), depth=data['roi_depth'].to(device),\n depth_normalize=data['depth_normalize'].to(device),\n obj_id=data['cat_id'].to(device), camK=data['cam_K'].to(device), gt_mask=data['roi_mask'].to(device),\n gt_R=data['rotation'].to(device), gt_t=data['translation'].to(device),\n gt_s=data['fsnet_scale'].to(device), mean_shape=data['mean_shape'].to(device),\n gt_2D=data['roi_coord_2d'].to(device), sym=data['sym_info'].to(device),\n aug_bb=data['aug_bb'].to(device), aug_rt_t=data['aug_rt_t'].to(device), aug_rt_r=data['aug_rt_R'].to(device),\n def_mask=data['roi_mask_deform'].to(device),\n model_point=data['model_point'].to(device), nocs_scale=data['nocs_scale'].to(device), do_loss=True)\n fsnet_loss = loss_dict['fsnet_loss']\n recon_loss = loss_dict['recon_loss']\n geo_loss = loss_dict['geo_loss']\n prop_loss = loss_dict['prop_loss']\n\n total_loss = sum(fsnet_loss.values()) + sum(recon_loss.values()) \\\n + sum(geo_loss.values()) + sum(prop_loss.values()) \\\n\n # backward\n if global_step % FLAGS.accumulate == 0:\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(network.parameters(), 5)\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n else:\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(network.parameters(), 5)\n\n global_step += 1\n summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag='lr',\n simple_value=optimizer.param_groups[0][\"lr\"]),\n tf.compat.v1.Summary.Value(tag='train_loss', simple_value=total_loss),\n tf.compat.v1.Summary.Value(tag='rot_loss_1',\n simple_value=fsnet_loss['Rot1']),\n tf.compat.v1.Summary.Value(tag='rot_loss_2',\n simple_value=fsnet_loss['Rot2']),\n tf.compat.v1.Summary.Value(tag='T_loss',\n simple_value=fsnet_loss['Tran']),\n tf.compat.v1.Summary.Value(tag='Prop_sym_recon',\n simple_value=prop_loss['Prop_sym_recon']),\n tf.compat.v1.Summary.Value(tag='Prop_sym_rt',\n simple_value=prop_loss['Prop_sym_rt']),\n tf.compat.v1.Summary.Value(tag='Size_loss',\n simple_value=fsnet_loss['Size']),\n tf.compat.v1.Summary.Value(tag='Face_loss',\n simple_value=recon_loss['recon_per_p']),\n tf.compat.v1.Summary.Value(tag='Recon_loss_r',\n simple_value=recon_loss['recon_point_r']),\n tf.compat.v1.Summary.Value(tag='Recon_loss_t',\n simple_value=recon_loss['recon_point_t']),\n tf.compat.v1.Summary.Value(tag='Recon_loss_s',\n simple_value=recon_loss['recon_point_s']),\n tf.compat.v1.Summary.Value(tag='Recon_p_f',\n simple_value=recon_loss['recon_p_f']),\n tf.compat.v1.Summary.Value(tag='Recon_loss_se',\n simple_value=recon_loss['recon_point_self']),\n tf.compat.v1.Summary.Value(tag='Face_loss_vote',\n simple_value=recon_loss['recon_point_vote']),\n ])\n tb_writter.add_summary(summary, global_step)\n\n if i % FLAGS.log_every == 0:\n logger.info('Batch {0} Loss:{1:f}, rot_loss:{2:f}, size_loss:{3:f}, trans_loss:{4:f}'.format(\n i, total_loss.item(), (fsnet_loss['Rot1']+fsnet_loss['Rot2']).item(),\n fsnet_loss['Size'].item(), fsnet_loss['Tran'].item()))\n\n logger.info('>>>>>>>>----------Epoch {:02d} train finish---------<<<<<<<<'.format(epoch))\n\n # save model\n if (epoch + 1) % FLAGS.save_every == 0 or (epoch + 1) == FLAGS.total_epoch:\n torch.save(network.state_dict(), '{0}/model_{1:02d}.pth'.format(FLAGS.model_save, epoch))\n\n\nif __name__ == \"__main__\":\n app.run(train)\n"
] | [
[
"torch.autograd.set_detect_anomaly",
"torch.load",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.Summary.Value",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler",
"tensorflow.compat.v1.disable_eager_execution"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mslapek/ray | [
"425edb5cd99417f00aed5bca5bca6a6d9e8292a2"
] | [
"python/ray/tests/test_basic.py"
] | [
"# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom concurrent.futures import ThreadPoolExecutor\nimport glob\nimport io\nimport json\nimport logging\nimport os\nimport random\nimport re\nimport setproctitle\nimport shutil\nimport six\nimport socket\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nfrom ray import signature\nfrom ray.exceptions import RayTimeoutError\nimport ray.ray_constants as ray_constants\nimport ray.tests.cluster_utils\nimport ray.tests.utils\n\nfrom ray.tests.utils import RayTestTimeoutException\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_simple_serialization(ray_start_regular):\n primitive_objects = [\n # Various primitive types.\n 0,\n 0.0,\n 0.9,\n 1 << 62,\n 1 << 999,\n \"a\",\n string.printable,\n \"\\u262F\",\n u\"hello world\",\n u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\",\n None,\n True,\n False,\n [],\n (),\n {},\n type,\n int,\n set(),\n # Collections types.\n collections.Counter([np.random.randint(0, 10) for _ in range(100)]),\n collections.OrderedDict([(\"hello\", 1), (\"world\", 2)]),\n collections.defaultdict(lambda: 0, [(\"hello\", 1), (\"world\", 2)]),\n collections.defaultdict(lambda: [], [(\"hello\", 1), (\"world\", 2)]),\n collections.deque([1, 2, 3, \"a\", \"b\", \"c\", 3.5]),\n # Numpy dtypes.\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n ]\n\n if sys.version_info < (3, 0):\n primitive_objects.append(long(0)) # noqa: E501,F821\n\n composite_objects = (\n [[obj]\n for obj in primitive_objects] + [(obj, )\n for obj in primitive_objects] + [{\n (): obj\n } for obj in primitive_objects])\n\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in primitive_objects + composite_objects:\n new_obj_1 = ray.get(f.remote(obj))\n new_obj_2 = ray.get(ray.put(obj))\n assert obj == new_obj_1\n assert obj == new_obj_2\n # TODO(rkn): The numpy dtypes currently come back as regular integers\n # or floats.\n if type(obj).__module__ != \"numpy\":\n assert type(obj) == type(new_obj_1)\n assert type(obj) == type(new_obj_2)\n\n\ndef test_fair_queueing(shutdown_only):\n ray.init(\n num_cpus=1, _internal_config=json.dumps({\n \"fair_queueing_enabled\": 1\n }))\n\n @ray.remote\n def h():\n return 0\n\n @ray.remote\n def g():\n return ray.get(h.remote())\n\n @ray.remote\n def f():\n return ray.get(g.remote())\n\n # This will never finish without fair queueing of {f, g, h}:\n # https://github.com/ray-project/ray/issues/3644\n ready, _ = ray.wait(\n [f.remote() for _ in range(1000)], timeout=60.0, num_returns=1000)\n assert len(ready) == 1000, len(ready)\n\n\ndef complex_serialization(use_pickle):\n def assert_equal(obj1, obj2):\n module_numpy = (type(obj1).__module__ == np.__name__\n or type(obj2).__module__ == np.__name__)\n if module_numpy:\n empty_shape = ((hasattr(obj1, \"shape\") and obj1.shape == ())\n or (hasattr(obj2, \"shape\") and obj2.shape == ()))\n if empty_shape:\n # This is a special case because currently\n # np.testing.assert_equal fails because we do not properly\n # handle different numerical types.\n assert obj1 == obj2, (\"Objects {} and {} are \"\n \"different.\".format(obj1, obj2))\n else:\n np.testing.assert_equal(obj1, obj2)\n elif hasattr(obj1, \"__dict__\") and hasattr(obj2, \"__dict__\"):\n special_keys = [\"_pytype_\"]\n assert (set(list(obj1.__dict__.keys()) + special_keys) == set(\n list(obj2.__dict__.keys()) + special_keys)), (\n \"Objects {} and {} are different.\".format(obj1, obj2))\n for key in obj1.__dict__.keys():\n if key not in special_keys:\n assert_equal(obj1.__dict__[key], obj2.__dict__[key])\n elif type(obj1) is dict or type(obj2) is dict:\n assert_equal(obj1.keys(), obj2.keys())\n for key in obj1.keys():\n assert_equal(obj1[key], obj2[key])\n elif type(obj1) is list or type(obj2) is list:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are lists with \"\n \"different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif type(obj1) is tuple or type(obj2) is tuple:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are tuples \"\n \"with different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif (ray.serialization.is_named_tuple(type(obj1))\n or ray.serialization.is_named_tuple(type(obj2))):\n assert len(obj1) == len(obj2), (\n \"Objects {} and {} are named \"\n \"tuples with different lengths.\".format(obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n else:\n assert obj1 == obj2, \"Objects {} and {} are different.\".format(\n obj1, obj2)\n\n if sys.version_info >= (3, 0):\n long_extras = [0, np.array([[\"hi\", u\"hi\"], [1.3, 1]])]\n else:\n\n long_extras = [\n long(0), # noqa: E501,F821\n np.array([\n [\"hi\", u\"hi\"],\n [1.3, long(1)] # noqa: E501,F821\n ])\n ]\n\n PRIMITIVE_OBJECTS = [\n 0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], \"a\",\n string.printable, \"\\u262F\", u\"hello world\",\n u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\", None, True, False, [], (), {},\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n np.zeros([100, 100]),\n np.random.normal(size=[100, 100]),\n np.array([\"hi\", 3]),\n np.array([\"hi\", 3], dtype=object)\n ] + long_extras\n\n COMPLEX_OBJECTS = [\n [[[[[[[[[[[[]]]]]]]]]]]],\n {\n \"obj{}\".format(i): np.random.normal(size=[100, 100])\n for i in range(10)\n },\n # {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {\n # (): {(): {}}}}}}}}}}}}},\n (\n (((((((((), ), ), ), ), ), ), ), ), ),\n {\n \"a\": {\n \"b\": {\n \"c\": {\n \"d\": {}\n }\n }\n }\n },\n ]\n\n class Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n class Bar(object):\n def __init__(self):\n for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):\n setattr(self, \"field{}\".format(i), val)\n\n class Baz(object):\n def __init__(self):\n self.foo = Foo()\n self.bar = Bar()\n\n def method(self, arg):\n pass\n\n class Qux(object):\n def __init__(self):\n self.objs = [Foo(), Bar(), Baz()]\n\n class SubQux(Qux):\n def __init__(self):\n Qux.__init__(self)\n\n class CustomError(Exception):\n pass\n\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n NamedTupleExample = collections.namedtuple(\n \"Example\", \"field1, field2, field3, field4, field5\")\n\n CUSTOM_OBJECTS = [\n Exception(\"Test object.\"),\n CustomError(),\n Point(11, y=22),\n Foo(),\n Bar(),\n Baz(), # Qux(), SubQux(),\n NamedTupleExample(1, 1.0, \"hi\", np.zeros([3, 5]), [1, 2, 3]),\n ]\n\n # Test dataclasses in Python 3.7.\n if sys.version_info >= (3, 7):\n from dataclasses import make_dataclass\n\n DataClass0 = make_dataclass(\"DataClass0\", [(\"number\", int)])\n\n CUSTOM_OBJECTS.append(DataClass0(number=3))\n\n class CustomClass(object):\n def __init__(self, value):\n self.value = value\n\n DataClass1 = make_dataclass(\"DataClass1\", [(\"custom\", CustomClass)])\n\n class DataClass2(DataClass1):\n @classmethod\n def from_custom(cls, data):\n custom = CustomClass(data)\n return cls(custom)\n\n def __reduce__(self):\n return (self.from_custom, (self.custom.value, ))\n\n CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))\n\n BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS\n\n LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]\n TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]\n # The check that type(obj).__module__ != \"numpy\" should be unnecessary, but\n # otherwise this seems to fail on Mac OS X on Travis.\n DICT_OBJECTS = ([{\n obj: obj\n } for obj in PRIMITIVE_OBJECTS if (\n obj.__hash__ is not None and type(obj).__module__ != \"numpy\")] + [{\n 0: obj\n } for obj in BASE_OBJECTS] + [{\n Foo(123): Foo(456)\n }])\n\n RAY_TEST_OBJECTS = (\n BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)\n\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in RAY_TEST_OBJECTS:\n assert_equal(obj, ray.get(f.remote(obj)))\n assert_equal(obj, ray.get(ray.put(obj)))\n\n # Test StringIO serialization\n s = io.StringIO(u\"Hello, world!\\n\")\n s.seek(0)\n line = s.readline()\n s.seek(0)\n assert ray.get(ray.put(s)).readline() == line\n\n\ndef test_complex_serialization(ray_start_regular):\n complex_serialization(use_pickle=False)\n\n\ndef test_complex_serialization_with_pickle(shutdown_only):\n ray.init(use_pickle=True)\n complex_serialization(use_pickle=True)\n\n\ndef test_nested_functions(ray_start_regular):\n # Make sure that remote functions can use other values that are defined\n # after the remote function but before the first function invocation.\n @ray.remote\n def f():\n return g(), ray.get(h.remote())\n\n def g():\n return 1\n\n @ray.remote\n def h():\n return 2\n\n assert ray.get(f.remote()) == (1, 2)\n\n # Test a remote function that recursively calls itself.\n\n @ray.remote\n def factorial(n):\n if n == 0:\n return 1\n return n * ray.get(factorial.remote(n - 1))\n\n assert ray.get(factorial.remote(0)) == 1\n assert ray.get(factorial.remote(1)) == 1\n assert ray.get(factorial.remote(2)) == 2\n assert ray.get(factorial.remote(3)) == 6\n assert ray.get(factorial.remote(4)) == 24\n assert ray.get(factorial.remote(5)) == 120\n\n # Test remote functions that recursively call each other.\n\n @ray.remote\n def factorial_even(n):\n assert n % 2 == 0\n if n == 0:\n return 1\n return n * ray.get(factorial_odd.remote(n - 1))\n\n @ray.remote\n def factorial_odd(n):\n assert n % 2 == 1\n return n * ray.get(factorial_even.remote(n - 1))\n\n assert ray.get(factorial_even.remote(4)) == 24\n assert ray.get(factorial_odd.remote(5)) == 120\n\n\ndef test_ray_recursive_objects(ray_start_regular):\n class ClassA(object):\n pass\n\n # Make a list that contains itself.\n lst = []\n lst.append(lst)\n # Make an object that contains itself as a field.\n a1 = ClassA()\n a1.field = a1\n # Make two objects that contain each other as fields.\n a2 = ClassA()\n a3 = ClassA()\n a2.field = a3\n a3.field = a2\n # Make a dictionary that contains itself.\n d1 = {}\n d1[\"key\"] = d1\n # Create a list of recursive objects.\n recursive_objects = [lst, a1, a2, a3, d1]\n\n if ray.worker.global_worker.use_pickle:\n # Serialize the recursive objects.\n for obj in recursive_objects:\n ray.put(obj)\n else:\n # Check that exceptions are thrown when we serialize the recursive\n # objects.\n for obj in recursive_objects:\n with pytest.raises(Exception):\n ray.put(obj)\n\n\ndef test_passing_arguments_by_value_out_of_the_box(ray_start_regular):\n @ray.remote\n def f(x):\n return x\n\n # Test passing lambdas.\n\n def temp():\n return 1\n\n assert ray.get(f.remote(temp))() == 1\n assert ray.get(f.remote(lambda x: x + 1))(3) == 4\n\n # Test sets.\n assert ray.get(f.remote(set())) == set()\n s = {1, (1, 2, \"hi\")}\n assert ray.get(f.remote(s)) == s\n\n # Test types.\n assert ray.get(f.remote(int)) == int\n assert ray.get(f.remote(float)) == float\n assert ray.get(f.remote(str)) == str\n\n class Foo(object):\n def __init__(self):\n pass\n\n # Make sure that we can put and get a custom type. Note that the result\n # won't be \"equal\" to Foo.\n ray.get(ray.put(Foo))\n\n\ndef test_putting_object_that_closes_over_object_id(ray_start_regular):\n # This test is here to prevent a regression of\n # https://github.com/ray-project/ray/issues/1317.\n\n class Foo(object):\n def __init__(self):\n self.val = ray.put(0)\n\n def method(self):\n f\n\n f = Foo()\n ray.put(f)\n\n\ndef test_put_get(shutdown_only):\n ray.init(num_cpus=0)\n\n for i in range(100):\n value_before = i * 10**6\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = i * 10**6 * 1.0\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = \"h\" * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = [1] * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n\ndef custom_serializers():\n class Foo(object):\n def __init__(self):\n self.x = 3\n\n def custom_serializer(obj):\n return 3, \"string1\", type(obj).__name__\n\n def custom_deserializer(serialized_obj):\n return serialized_obj, \"string2\"\n\n ray.register_custom_serializer(\n Foo, serializer=custom_serializer, deserializer=custom_deserializer)\n\n assert ray.get(ray.put(Foo())) == ((3, \"string1\", Foo.__name__), \"string2\")\n\n class Bar(object):\n def __init__(self):\n self.x = 3\n\n ray.register_custom_serializer(\n Bar, serializer=custom_serializer, deserializer=custom_deserializer)\n\n @ray.remote\n def f():\n return Bar()\n\n assert ray.get(f.remote()) == ((3, \"string1\", Bar.__name__), \"string2\")\n\n\ndef test_custom_serializers(ray_start_regular):\n custom_serializers()\n\n\ndef test_custom_serializers_with_pickle(shutdown_only):\n ray.init(use_pickle=True)\n custom_serializers()\n\n class Foo(object):\n def __init__(self):\n self.x = 4\n\n # Test the pickle serialization backend without serializer.\n # NOTE: 'use_pickle' here is different from 'use_pickle' in\n # ray.init\n ray.register_custom_serializer(Foo, use_pickle=True)\n\n @ray.remote\n def f():\n return Foo()\n\n assert type(ray.get(f.remote())) == Foo\n\n\ndef test_serialization_final_fallback(ray_start_regular):\n pytest.importorskip(\"catboost\")\n # This test will only run when \"catboost\" is installed.\n from catboost import CatBoostClassifier\n\n model = CatBoostClassifier(\n iterations=2,\n depth=2,\n learning_rate=1,\n loss_function=\"Logloss\",\n logging_level=\"Verbose\")\n\n reconstructed_model = ray.get(ray.put(model))\n assert set(model.get_params().items()) == set(\n reconstructed_model.get_params().items())\n\n\ndef test_register_class(ray_start_2_cpus):\n # Check that putting an object of a class that has not been registered\n # throws an exception.\n class TempClass(object):\n pass\n\n ray.get(ray.put(TempClass()))\n\n # Test passing custom classes into remote functions from the driver.\n @ray.remote\n def f(x):\n return x\n\n class Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n foo = ray.get(f.remote(Foo(7)))\n assert foo == Foo(7)\n\n regex = re.compile(r\"\\d+\\.\\d*\")\n new_regex = ray.get(f.remote(regex))\n # This seems to fail on the system Python 3 that comes with\n # Ubuntu, so it is commented out for now:\n # assert regex == new_regex\n # Instead, we do this:\n assert regex.pattern == new_regex.pattern\n\n class TempClass1(object):\n def __init__(self):\n self.value = 1\n\n # Test returning custom classes created on workers.\n @ray.remote\n def g():\n class TempClass2(object):\n def __init__(self):\n self.value = 2\n\n return TempClass1(), TempClass2()\n\n object_1, object_2 = ray.get(g.remote())\n assert object_1.value == 1\n assert object_2.value == 2\n\n # Test exporting custom class definitions from one worker to another\n # when the worker is blocked in a get.\n class NewTempClass(object):\n def __init__(self, value):\n self.value = value\n\n @ray.remote\n def h1(x):\n return NewTempClass(x)\n\n @ray.remote\n def h2(x):\n return ray.get(h1.remote(x))\n\n assert ray.get(h2.remote(10)).value == 10\n\n # Test registering multiple classes with the same name.\n @ray.remote(num_return_vals=3)\n def j():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = []\n for _ in range(5):\n results += j.remote()\n for i in range(len(results) // 3):\n c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])\n\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n @ray.remote\n def k():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = ray.get([k.remote() for _ in range(5)])\n for c0, c1, c2 in results:\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n\ndef test_keyword_args(ray_start_regular):\n @ray.remote\n def keyword_fct1(a, b=\"hello\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct2(a=\"hello\", b=\"world\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct3(a, b, c=\"hello\", d=\"world\"):\n return \"{} {} {} {}\".format(a, b, c, d)\n\n x = keyword_fct1.remote(1)\n assert ray.get(x) == \"1 hello\"\n x = keyword_fct1.remote(1, \"hi\")\n assert ray.get(x) == \"1 hi\"\n x = keyword_fct1.remote(1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n x = keyword_fct1.remote(a=1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n\n x = keyword_fct2.remote(a=\"w\", b=\"hi\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(b=\"hi\", a=\"w\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(a=\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(b=\"hi\")\n assert ray.get(x) == \"hello hi\"\n x = keyword_fct2.remote(\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(\"w\", \"hi\")\n assert ray.get(x) == \"w hi\"\n\n x = keyword_fct3.remote(0, 1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(a=0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, d=\"hi\", c=\"w\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, c=\"w\")\n assert ray.get(x) == \"0 1 w world\"\n x = keyword_fct3.remote(0, 1, d=\"hi\")\n assert ray.get(x) == \"0 1 hello hi\"\n x = keyword_fct3.remote(0, 1)\n assert ray.get(x) == \"0 1 hello world\"\n x = keyword_fct3.remote(a=0, b=1)\n assert ray.get(x) == \"0 1 hello world\"\n\n # Check that we cannot pass invalid keyword arguments to functions.\n @ray.remote\n def f1():\n return\n\n @ray.remote\n def f2(x, y=0, z=0):\n return\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f1.remote(3)\n\n with pytest.raises(Exception):\n f1.remote(x=3)\n\n with pytest.raises(Exception):\n f2.remote(0, w=0)\n\n with pytest.raises(Exception):\n f2.remote(3, x=3)\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f2.remote(1, 2, 3, 4)\n\n @ray.remote\n def f3(x):\n return x\n\n assert ray.get(f3.remote(4)) == 4\n\n\[email protected](\n sys.version_info < (3, 0), reason=\"This test requires Python 3.\")\[email protected](\n \"ray_start_regular\", [{\n \"local_mode\": True\n }, {\n \"local_mode\": False\n }],\n indirect=True)\ndef test_args_starkwargs(ray_start_regular):\n def starkwargs(a, b, **kwargs):\n return a, b, kwargs\n\n class TestActor(object):\n def starkwargs(self, a, b, **kwargs):\n return a, b, kwargs\n\n def test_function(fn, remote_fn):\n assert fn(1, 2, x=3) == ray.get(remote_fn.remote(1, 2, x=3))\n with pytest.raises(TypeError):\n remote_fn.remote(3)\n\n remote_test_function = ray.remote(test_function)\n\n remote_starkwargs = ray.remote(starkwargs)\n test_function(starkwargs, remote_starkwargs)\n ray.get(remote_test_function.remote(starkwargs, remote_starkwargs))\n\n remote_actor_class = ray.remote(TestActor)\n remote_actor = remote_actor_class.remote()\n actor_method = remote_actor.starkwargs\n local_actor = TestActor()\n local_method = local_actor.starkwargs\n test_function(local_method, actor_method)\n ray.get(remote_test_function.remote(local_method, actor_method))\n\n\[email protected](\n sys.version_info < (3, 0), reason=\"This test requires Python 3.\")\[email protected](\n \"ray_start_regular\", [{\n \"local_mode\": True\n }, {\n \"local_mode\": False\n }],\n indirect=True)\ndef test_args_named_and_star(ray_start_regular):\n def hello(a, x=\"hello\", **kwargs):\n return a, x, kwargs\n\n class TestActor(object):\n def hello(self, a, x=\"hello\", **kwargs):\n return a, x, kwargs\n\n def test_function(fn, remote_fn):\n assert fn(1, x=2, y=3) == ray.get(remote_fn.remote(1, x=2, y=3))\n assert fn(1, 2, y=3) == ray.get(remote_fn.remote(1, 2, y=3))\n assert fn(1, y=3) == ray.get(remote_fn.remote(1, y=3))\n\n assert fn(1, ) == ray.get(remote_fn.remote(1, ))\n assert fn(1) == ray.get(remote_fn.remote(1))\n\n with pytest.raises(TypeError):\n remote_fn.remote(1, 2, x=3)\n\n remote_test_function = ray.remote(test_function)\n\n remote_hello = ray.remote(hello)\n test_function(hello, remote_hello)\n ray.get(remote_test_function.remote(hello, remote_hello))\n\n remote_actor_class = ray.remote(TestActor)\n remote_actor = remote_actor_class.remote()\n actor_method = remote_actor.hello\n local_actor = TestActor()\n local_method = local_actor.hello\n test_function(local_method, actor_method)\n ray.get(remote_test_function.remote(local_method, actor_method))\n\n\[email protected](\n sys.version_info < (3, 0), reason=\"This test requires Python 3.\")\[email protected](\n \"ray_start_regular\", [{\n \"local_mode\": True\n }, {\n \"local_mode\": False\n }],\n indirect=True)\ndef test_args_stars_after(ray_start_regular):\n def star_args_after(a=\"hello\", b=\"heo\", *args, **kwargs):\n return a, b, args, kwargs\n\n class TestActor(object):\n def star_args_after(self, a=\"hello\", b=\"heo\", *args, **kwargs):\n return a, b, args, kwargs\n\n def test_function(fn, remote_fn):\n assert fn(\"hi\", \"hello\", 2) == ray.get(\n remote_fn.remote(\"hi\", \"hello\", 2))\n assert fn(\n \"hi\", \"hello\", 2, hi=\"hi\") == ray.get(\n remote_fn.remote(\"hi\", \"hello\", 2, hi=\"hi\"))\n assert fn(hi=\"hi\") == ray.get(remote_fn.remote(hi=\"hi\"))\n\n remote_test_function = ray.remote(test_function)\n\n remote_star_args_after = ray.remote(star_args_after)\n test_function(star_args_after, remote_star_args_after)\n ray.get(\n remote_test_function.remote(star_args_after, remote_star_args_after))\n\n remote_actor_class = ray.remote(TestActor)\n remote_actor = remote_actor_class.remote()\n actor_method = remote_actor.star_args_after\n local_actor = TestActor()\n local_method = local_actor.star_args_after\n test_function(local_method, actor_method)\n ray.get(remote_test_function.remote(local_method, actor_method))\n\n\ndef test_variable_number_of_args(shutdown_only):\n @ray.remote\n def varargs_fct1(*a):\n return \" \".join(map(str, a))\n\n @ray.remote\n def varargs_fct2(a, *b):\n return \" \".join(map(str, b))\n\n ray.init(num_cpus=1)\n\n x = varargs_fct1.remote(0, 1, 2)\n assert ray.get(x) == \"0 1 2\"\n x = varargs_fct2.remote(0, 1, 2)\n assert ray.get(x) == \"1 2\"\n\n @ray.remote\n def f1(*args):\n return args\n\n @ray.remote\n def f2(x, y, *args):\n return x, y, args\n\n assert ray.get(f1.remote()) == ()\n assert ray.get(f1.remote(1)) == (1, )\n assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)\n with pytest.raises(Exception):\n f2.remote()\n with pytest.raises(Exception):\n f2.remote(1)\n assert ray.get(f2.remote(1, 2)) == (1, 2, ())\n assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))\n assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))\n\n def testNoArgs(self):\n @ray.remote\n def no_op():\n pass\n\n self.ray_start()\n\n ray.get(no_op.remote())\n\n\ndef test_defining_remote_functions(shutdown_only):\n ray.init(num_cpus=3)\n\n # Test that we can define a remote function in the shell.\n @ray.remote\n def f(x):\n return x + 1\n\n assert ray.get(f.remote(0)) == 1\n\n # Test that we can redefine the remote function.\n @ray.remote\n def f(x):\n return x + 10\n\n while True:\n val = ray.get(f.remote(0))\n assert val in [1, 10]\n if val == 10:\n break\n else:\n logger.info(\"Still using old definition of f, trying again.\")\n\n # Test that we can close over plain old data.\n data = [\n np.zeros([3, 5]), (1, 2, \"a\"), [0.0, 1.0, 1 << 62], 1 << 60, {\n \"a\": np.zeros(3)\n }\n ]\n\n @ray.remote\n def g():\n return data\n\n ray.get(g.remote())\n\n # Test that we can close over modules.\n @ray.remote\n def h():\n return np.zeros([3, 5])\n\n assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))\n\n @ray.remote\n def j():\n return time.time()\n\n ray.get(j.remote())\n\n # Test that we can define remote functions that call other remote\n # functions.\n @ray.remote\n def k(x):\n return x + 1\n\n @ray.remote\n def k2(x):\n return ray.get(k.remote(x))\n\n @ray.remote\n def m(x):\n return ray.get(k2.remote(x))\n\n assert ray.get(k.remote(1)) == 2\n assert ray.get(k2.remote(1)) == 2\n assert ray.get(m.remote(1)) == 2\n\n\ndef test_submit_api(shutdown_only):\n ray.init(num_cpus=2, num_gpus=1, resources={\"Custom\": 1})\n\n @ray.remote\n def f(n):\n return list(range(n))\n\n @ray.remote\n def g():\n return ray.get_gpu_ids()\n\n assert f._remote([0], num_return_vals=0) is None\n id1 = f._remote(args=[1], num_return_vals=1)\n assert ray.get(id1) == [0]\n id1, id2 = f._remote(args=[2], num_return_vals=2)\n assert ray.get([id1, id2]) == [0, 1]\n id1, id2, id3 = f._remote(args=[3], num_return_vals=3)\n assert ray.get([id1, id2, id3]) == [0, 1, 2]\n assert ray.get(\n g._remote(args=[], num_cpus=1, num_gpus=1,\n resources={\"Custom\": 1})) == [0]\n infeasible_id = g._remote(args=[], resources={\"NonexistentCustom\": 1})\n assert ray.get(g._remote()) == []\n ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)\n assert len(ready_ids) == 0\n assert len(remaining_ids) == 1\n\n @ray.remote\n class Actor(object):\n def __init__(self, x, y=0):\n self.x = x\n self.y = y\n\n def method(self, a, b=0):\n return self.x, self.y, a, b\n\n def gpu_ids(self):\n return ray.get_gpu_ids()\n\n @ray.remote\n class Actor2(object):\n def __init__(self):\n pass\n\n def method(self):\n pass\n\n a = Actor._remote(\n args=[0], kwargs={\"y\": 1}, num_gpus=1, resources={\"Custom\": 1})\n\n a2 = Actor2._remote()\n ray.get(a2.method._remote())\n\n id1, id2, id3, id4 = a.method._remote(\n args=[\"test\"], kwargs={\"b\": 2}, num_return_vals=4)\n assert ray.get([id1, id2, id3, id4]) == [0, 1, \"test\", 2]\n\n\ndef test_many_fractional_resources(shutdown_only):\n ray.init(num_cpus=2, num_gpus=2, resources={\"Custom\": 2})\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote\n def f(block, accepted_resources):\n true_resources = {\n resource: value[0][1]\n for resource, value in ray.get_resource_ids().items()\n }\n if block:\n ray.get(g.remote())\n return true_resources == accepted_resources\n\n # Check that the resource are assigned correctly.\n result_ids = []\n for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):\n resource_set = {\"CPU\": int(rand1 * 10000) / 10000}\n result_ids.append(f._remote([False, resource_set], num_cpus=rand1))\n\n resource_set = {\"CPU\": 1, \"GPU\": int(rand1 * 10000) / 10000}\n result_ids.append(f._remote([False, resource_set], num_gpus=rand1))\n\n resource_set = {\"CPU\": 1, \"Custom\": int(rand1 * 10000) / 10000}\n result_ids.append(\n f._remote([False, resource_set], resources={\"Custom\": rand1}))\n\n resource_set = {\n \"CPU\": int(rand1 * 10000) / 10000,\n \"GPU\": int(rand2 * 10000) / 10000,\n \"Custom\": int(rand3 * 10000) / 10000\n }\n result_ids.append(\n f._remote(\n [False, resource_set],\n num_cpus=rand1,\n num_gpus=rand2,\n resources={\"Custom\": rand3}))\n result_ids.append(\n f._remote(\n [True, resource_set],\n num_cpus=rand1,\n num_gpus=rand2,\n resources={\"Custom\": rand3}))\n assert all(ray.get(result_ids))\n\n # Check that the available resources at the end are the same as the\n # beginning.\n stop_time = time.time() + 10\n correct_available_resources = False\n while time.time() < stop_time:\n if (ray.available_resources()[\"CPU\"] == 2.0\n and ray.available_resources()[\"GPU\"] == 2.0\n and ray.available_resources()[\"Custom\"] == 2.0):\n correct_available_resources = True\n break\n if not correct_available_resources:\n assert False, \"Did not get correct available resources.\"\n\n\ndef test_get_multiple(ray_start_regular):\n object_ids = [ray.put(i) for i in range(10)]\n assert ray.get(object_ids) == list(range(10))\n\n # Get a random choice of object IDs with duplicates.\n indices = list(np.random.choice(range(10), 5))\n indices += indices\n results = ray.get([object_ids[i] for i in indices])\n assert results == indices\n\n\ndef test_get_multiple_experimental(ray_start_regular):\n object_ids = [ray.put(i) for i in range(10)]\n\n object_ids_tuple = tuple(object_ids)\n assert ray.experimental.get(object_ids_tuple) == list(range(10))\n\n object_ids_nparray = np.array(object_ids)\n assert ray.experimental.get(object_ids_nparray) == list(range(10))\n\n\ndef test_get_dict(ray_start_regular):\n d = {str(i): ray.put(i) for i in range(5)}\n for i in range(5, 10):\n d[str(i)] = i\n result = ray.experimental.get(d)\n expected = {str(i): i for i in range(10)}\n assert result == expected\n\n\ndef test_get_with_timeout(ray_start_regular):\n @ray.remote\n def f(a):\n time.sleep(a)\n return a\n\n assert ray.get(f.remote(3), timeout=10) == 3\n\n obj_id = f.remote(3)\n with pytest.raises(RayTimeoutError):\n ray.get(obj_id, timeout=2)\n assert ray.get(obj_id, timeout=2) == 3\n\n\[email protected](\n \"ray_start_cluster\", [{\n \"num_cpus\": 1,\n \"num_nodes\": 1,\n }, {\n \"num_cpus\": 1,\n \"num_nodes\": 2,\n }],\n indirect=True)\ndef test_direct_call_simple(ray_start_cluster):\n @ray.remote\n def f(x):\n return x + 1\n\n f_direct = f.options(is_direct_call=True)\n assert ray.get(f_direct.remote(2)) == 3\n for _ in range(10):\n assert ray.get([f_direct.remote(i) for i in range(100)]) == list(\n range(1, 101))\n\n\ndef test_direct_call_refcount(ray_start_regular):\n @ray.remote\n def f(x):\n return x + 1\n\n @ray.remote\n def sleep():\n time.sleep(.1)\n return 1\n\n # Multiple gets should not hang with ref counting enabled.\n f_direct = f.options(is_direct_call=True)\n x = f_direct.remote(2)\n ray.get(x)\n ray.get(x)\n\n # Temporary objects should be retained for chained callers.\n y = f_direct.remote(sleep.options(is_direct_call=True).remote())\n assert ray.get(y) == 2\n\n\ndef test_direct_call_matrix(shutdown_only):\n ray.init(object_store_memory=1000 * 1024 * 1024)\n\n @ray.remote\n class Actor(object):\n def small_value(self):\n return 0\n\n def large_value(self):\n return np.zeros(10 * 1024 * 1024)\n\n def echo(self, x):\n if isinstance(x, list):\n x = ray.get(x[0])\n return x\n\n @ray.remote\n def small_value():\n return 0\n\n @ray.remote\n def large_value():\n return np.zeros(10 * 1024 * 1024)\n\n @ray.remote\n def echo(x):\n if isinstance(x, list):\n x = ray.get(x[0])\n return x\n\n def check(source_actor, dest_actor, is_large, out_of_band):\n print(\"CHECKING\", \"actor\" if source_actor else \"task\", \"to\", \"actor\"\n if dest_actor else \"task\", \"large_object\"\n if is_large else \"small_object\", \"out_of_band\"\n if out_of_band else \"in_band\")\n if source_actor:\n a = Actor.options(is_direct_call=True).remote()\n if is_large:\n x_id = a.large_value.remote()\n else:\n x_id = a.small_value.remote()\n else:\n if is_large:\n x_id = large_value.options(is_direct_call=True).remote()\n else:\n x_id = small_value.options(is_direct_call=True).remote()\n if out_of_band:\n x_id = [x_id]\n if dest_actor:\n b = Actor.options(is_direct_call=True).remote()\n x = ray.get(b.echo.remote(x_id))\n else:\n x = ray.get(echo.options(is_direct_call=True).remote(x_id))\n if is_large:\n assert isinstance(x, np.ndarray)\n else:\n assert isinstance(x, int)\n\n for is_large in [False, True]:\n for source_actor in [False, True]:\n for dest_actor in [False, True]:\n for out_of_band in [False, True]:\n check(source_actor, dest_actor, is_large, out_of_band)\n\n\[email protected](\n \"ray_start_cluster\", [{\n \"num_cpus\": 1,\n \"num_nodes\": 1,\n }, {\n \"num_cpus\": 1,\n \"num_nodes\": 2,\n }],\n indirect=True)\ndef test_direct_call_chain(ray_start_cluster):\n @ray.remote\n def g(x):\n return x + 1\n\n g_direct = g.options(is_direct_call=True)\n x = 0\n for _ in range(100):\n x = g_direct.remote(x)\n assert ray.get(x) == 100\n\n\ndef test_direct_actor_enabled(ray_start_regular):\n @ray.remote\n class Actor(object):\n def __init__(self):\n pass\n\n def f(self, x):\n return x * 2\n\n a = Actor._remote(is_direct_call=True)\n obj_id = a.f.remote(1)\n # it is not stored in plasma\n assert not ray.worker.global_worker.core_worker.object_exists(obj_id)\n assert ray.get(obj_id) == 2\n\n\ndef test_direct_actor_order(shutdown_only):\n ray.init(num_cpus=4)\n\n @ray.remote\n def small_value():\n time.sleep(0.01 * np.random.randint(0, 10))\n return 0\n\n @ray.remote\n class Actor(object):\n def __init__(self):\n self.count = 0\n\n def inc(self, count, dependency):\n assert count == self.count\n self.count += 1\n return count\n\n a = Actor._remote(is_direct_call=True)\n assert ray.get([\n a.inc.remote(i, small_value.options(is_direct_call=True).remote())\n for i in range(100)\n ]) == list(range(100))\n\n\ndef test_direct_actor_large_objects(ray_start_regular):\n @ray.remote\n class Actor(object):\n def __init__(self):\n pass\n\n def f(self):\n time.sleep(1)\n return np.zeros(10000000)\n\n a = Actor._remote(is_direct_call=True)\n obj_id = a.f.remote()\n assert not ray.worker.global_worker.core_worker.object_exists(obj_id)\n done, _ = ray.wait([obj_id])\n assert len(done) == 1\n assert ray.worker.global_worker.core_worker.object_exists(obj_id)\n assert isinstance(ray.get(obj_id), np.ndarray)\n\n\ndef test_direct_actor_pass_by_ref(ray_start_regular):\n @ray.remote\n class Actor(object):\n def __init__(self):\n pass\n\n def f(self, x):\n return x * 2\n\n @ray.remote\n def f(x):\n return x\n\n @ray.remote\n def error():\n sys.exit(0)\n\n a = Actor._remote(is_direct_call=True)\n assert ray.get(a.f.remote(f.remote(1))) == 2\n\n fut = [a.f.remote(f.remote(i)) for i in range(100)]\n assert ray.get(fut) == [i * 2 for i in range(100)]\n\n # propagates errors for pass by ref\n with pytest.raises(Exception):\n ray.get(a.f.remote(error.remote()))\n\n\ndef test_direct_actor_pass_by_ref_order_optimization(shutdown_only):\n ray.init(num_cpus=4)\n\n @ray.remote\n class Actor(object):\n def __init__(self):\n pass\n\n def f(self, x):\n pass\n\n a = Actor._remote(is_direct_call=True)\n\n @ray.remote\n def fast_value():\n print(\"fast value\")\n pass\n\n @ray.remote\n def slow_value():\n print(\"start sleep\")\n time.sleep(30)\n\n @ray.remote\n def runner(f):\n print(\"runner\", a, f)\n return ray.get(a.f.remote(f.remote()))\n\n runner.remote(slow_value)\n time.sleep(1)\n x2 = runner.remote(fast_value)\n start = time.time()\n ray.get(x2)\n delta = time.time() - start\n assert delta < 10, \"did not skip slow value\"\n\n\ndef test_direct_actor_recursive(ray_start_regular):\n @ray.remote\n class Actor(object):\n def __init__(self, delegate=None):\n self.delegate = delegate\n\n def f(self, x):\n if self.delegate:\n return ray.get(self.delegate.f.remote(x))\n return x * 2\n\n a = Actor._remote(is_direct_call=True)\n b = Actor._remote(args=[a], is_direct_call=True)\n c = Actor._remote(args=[b], is_direct_call=True)\n\n result = ray.get([c.f.remote(i) for i in range(100)])\n assert result == [x * 2 for x in range(100)]\n\n result, _ = ray.wait([c.f.remote(i) for i in range(100)], num_returns=100)\n result = ray.get(result)\n assert result == [x * 2 for x in range(100)]\n\n\ndef test_direct_actor_concurrent(ray_start_regular):\n @ray.remote\n class Batcher(object):\n def __init__(self):\n self.batch = []\n self.event = threading.Event()\n\n def add(self, x):\n self.batch.append(x)\n if len(self.batch) >= 3:\n self.event.set()\n else:\n self.event.wait()\n return sorted(self.batch)\n\n a = Batcher.options(is_direct_call=True, max_concurrency=3).remote()\n x1 = a.add.remote(1)\n x2 = a.add.remote(2)\n x3 = a.add.remote(3)\n r1 = ray.get(x1)\n r2 = ray.get(x2)\n r3 = ray.get(x3)\n assert r1 == [1, 2, 3]\n assert r1 == r2 == r3\n\n\ndef test_wait(ray_start_regular):\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n ready_ids, remaining_ids = ray.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)\n assert set(ready_ids) == set(objectids)\n assert remaining_ids == []\n\n objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)\n assert time.time() - start_time < 2\n assert len(ready_ids) == 3\n assert len(remaining_ids) == 1\n ray.wait(objectids)\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)\n assert time.time() - start_time < 5\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n # Verify that calling wait with duplicate object IDs throws an\n # exception.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.wait([x, x])\n\n # Make sure it is possible to call wait with an empty list.\n ready_ids, remaining_ids = ray.wait([])\n assert ready_ids == []\n assert remaining_ids == []\n\n # Test semantics of num_returns with no timeout.\n oids = [ray.put(i) for i in range(10)]\n (found, rest) = ray.wait(oids, num_returns=2)\n assert len(found) == 2\n assert len(rest) == 8\n\n # Verify that incorrect usage raises a TypeError.\n x = ray.put(1)\n with pytest.raises(TypeError):\n ray.wait(x)\n with pytest.raises(TypeError):\n ray.wait(1)\n with pytest.raises(TypeError):\n ray.wait([1])\n\n\ndef test_wait_iterables(ray_start_regular):\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n objectids = np.array(\n [f.remote(1.0),\n f.remote(0.5),\n f.remote(0.5),\n f.remote(0.5)])\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n\ndef test_multiple_waits_and_gets(shutdown_only):\n # It is important to use three workers here, so that the three tasks\n # launched in this experiment can run at the same time.\n ray.init(num_cpus=3)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n @ray.remote\n def g(l):\n # The argument l should be a list containing one object ID.\n ray.wait([l[0]])\n\n @ray.remote\n def h(l):\n # The argument l should be a list containing one object ID.\n ray.get(l[0])\n\n # Make sure that multiple wait requests involving the same object ID\n # all return.\n x = f.remote(1)\n ray.get([g.remote([x]), g.remote([x])])\n\n # Make sure that multiple get requests involving the same object ID all\n # return.\n x = f.remote(1)\n ray.get([h.remote([x]), h.remote([x])])\n\n\ndef test_caching_functions_to_run(shutdown_only):\n # Test that we export functions to run on all workers before the driver\n # is connected.\n def f(worker_info):\n sys.path.append(1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def f(worker_info):\n sys.path.append(2)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def g(worker_info):\n sys.path.append(3)\n\n ray.worker.global_worker.run_function_on_all_workers(g)\n\n def f(worker_info):\n sys.path.append(4)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n ray.init(num_cpus=1)\n\n @ray.remote\n def get_state():\n time.sleep(1)\n return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]\n\n res1 = get_state.remote()\n res2 = get_state.remote()\n assert ray.get(res1) == (1, 2, 3, 4)\n assert ray.get(res2) == (1, 2, 3, 4)\n\n # Clean up the path on the workers.\n def f(worker_info):\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n\ndef test_running_function_on_all_workers(ray_start_regular):\n def f(worker_info):\n sys.path.append(\"fake_directory\")\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n @ray.remote\n def get_path1():\n return sys.path\n\n assert \"fake_directory\" == ray.get(get_path1.remote())[-1]\n\n def f(worker_info):\n sys.path.pop(-1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n # Create a second remote function to guarantee that when we call\n # get_path2.remote(), the second function to run will have been run on\n # the worker.\n @ray.remote\n def get_path2():\n return sys.path\n\n assert \"fake_directory\" not in ray.get(get_path2.remote())\n\n\ndef test_profiling_api(ray_start_2_cpus):\n @ray.remote\n def f():\n with ray.profile(\"custom_event\", extra_data={\"name\": \"custom name\"}):\n pass\n\n ray.put(1)\n object_id = f.remote()\n ray.wait([object_id])\n ray.get(object_id)\n\n # Wait until all of the profiling information appears in the profile\n # table.\n timeout_seconds = 20\n start_time = time.time()\n while True:\n profile_data = ray.timeline()\n event_types = {event[\"cat\"] for event in profile_data}\n expected_types = [\n \"task\",\n \"task:deserialize_arguments\",\n \"task:execute\",\n \"task:store_outputs\",\n \"wait_for_function\",\n \"ray.get\",\n \"ray.put\",\n \"ray.wait\",\n \"submit_task\",\n \"fetch_and_run_function\",\n \"register_remote_function\",\n \"custom_event\", # This is the custom one from ray.profile.\n ]\n\n if all(expected_type in event_types\n for expected_type in expected_types):\n break\n\n if time.time() - start_time > timeout_seconds:\n raise RayTestTimeoutException(\n \"Timed out while waiting for information in \"\n \"profile table. Missing events: {}.\".format(\n set(expected_types) - set(event_types)))\n\n # The profiling information only flushes once every second.\n time.sleep(1.1)\n\n\ndef test_wait_cluster(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=1, resources={\"RemoteResource\": 1})\n cluster.add_node(num_cpus=1, resources={\"RemoteResource\": 1})\n ray.init(address=cluster.address)\n\n @ray.remote(resources={\"RemoteResource\": 1})\n def f():\n return\n\n # Make sure we have enough workers on the remote nodes to execute some\n # tasks.\n tasks = [f.remote() for _ in range(10)]\n start = time.time()\n ray.get(tasks)\n end = time.time()\n\n # Submit some more tasks that can only be executed on the remote nodes.\n tasks = [f.remote() for _ in range(10)]\n # Sleep for a bit to let the tasks finish.\n time.sleep((end - start) * 2)\n _, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)\n # All remote tasks should have finished.\n assert len(unready) == 0\n\n\ndef test_object_transfer_dump(ray_start_cluster):\n cluster = ray_start_cluster\n\n num_nodes = 3\n for i in range(num_nodes):\n cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f(x):\n return\n\n # These objects will live on different nodes.\n object_ids = [\n f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)\n ]\n\n # Broadcast each object from each machine to each other machine.\n for object_id in object_ids:\n ray.get([\n f._remote(args=[object_id], resources={str(i): 1})\n for i in range(num_nodes)\n ])\n\n # The profiling information only flushes once every second.\n time.sleep(1.1)\n\n transfer_dump = ray.object_transfer_timeline()\n # Make sure the transfer dump can be serialized with JSON.\n json.loads(json.dumps(transfer_dump))\n assert len(transfer_dump) >= num_nodes**2\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_receive\"\n }) == num_nodes\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_send\"\n }) == num_nodes\n\n\ndef test_identical_function_names(ray_start_regular):\n # Define a bunch of remote functions and make sure that we don't\n # accidentally call an older version.\n\n num_calls = 200\n\n @ray.remote\n def f():\n return 1\n\n results1 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 2\n\n results2 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 3\n\n results3 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 4\n\n results4 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 5\n\n results5 = [f.remote() for _ in range(num_calls)]\n\n assert ray.get(results1) == num_calls * [1]\n assert ray.get(results2) == num_calls * [2]\n assert ray.get(results3) == num_calls * [3]\n assert ray.get(results4) == num_calls * [4]\n assert ray.get(results5) == num_calls * [5]\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote # noqa: F811\n def g():\n return 2\n\n @ray.remote # noqa: F811\n def g():\n return 3\n\n @ray.remote # noqa: F811\n def g():\n return 4\n\n @ray.remote # noqa: F811\n def g():\n return 5\n\n result_values = ray.get([g.remote() for _ in range(num_calls)])\n assert result_values == num_calls * [5]\n\n\ndef test_illegal_api_calls(ray_start_regular):\n\n # Verify that we cannot call put on an ObjectID.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.put(x)\n # Verify that we cannot call get on a regular value.\n with pytest.raises(Exception):\n ray.get(3)\n\n\n# TODO(hchen): This test currently doesn't work in Python 2. This is likely\n# because plasma client isn't thread-safe. This needs to be fixed from the\n# Arrow side. See #4107 for relevant discussions.\[email protected](six.PY2, reason=\"Doesn't work in Python 2.\")\ndef test_multithreading(ray_start_2_cpus):\n # This test requires at least 2 CPUs to finish since the worker does not\n # release resources when joining the threads.\n\n def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):\n \"\"\"A helper function that runs test cases in multiple threads.\"\"\"\n\n def wrapper():\n for _ in range(num_repeats):\n test_case()\n time.sleep(random.randint(0, 10) / 1000.0)\n return \"ok\"\n\n executor = ThreadPoolExecutor(max_workers=num_threads)\n futures = [executor.submit(wrapper) for _ in range(num_threads)]\n for future in futures:\n assert future.result() == \"ok\"\n\n @ray.remote\n def echo(value, delay_ms=0):\n if delay_ms > 0:\n time.sleep(delay_ms / 1000.0)\n return value\n\n def test_api_in_multi_threads():\n \"\"\"Test using Ray api in multiple threads.\"\"\"\n\n @ray.remote\n class Echo(object):\n def echo(self, value):\n return value\n\n # Test calling remote functions in multiple threads.\n def test_remote_call():\n value = random.randint(0, 1000000)\n result = ray.get(echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_remote_call)\n\n # Test multiple threads calling one actor.\n actor = Echo.remote()\n\n def test_call_actor():\n value = random.randint(0, 1000000)\n result = ray.get(actor.echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_call_actor)\n\n # Test put and get.\n def test_put_and_get():\n value = random.randint(0, 1000000)\n result = ray.get(ray.put(value))\n assert value == result\n\n run_test_in_multi_threads(test_put_and_get)\n\n # Test multiple threads waiting for objects.\n num_wait_objects = 10\n objects = [\n echo.remote(i, delay_ms=10) for i in range(num_wait_objects)\n ]\n\n def test_wait():\n ready, _ = ray.wait(\n objects,\n num_returns=len(objects),\n timeout=1000.0,\n )\n assert len(ready) == num_wait_objects\n assert ray.get(ready) == list(range(num_wait_objects))\n\n run_test_in_multi_threads(test_wait, num_repeats=1)\n\n # Run tests in a driver.\n test_api_in_multi_threads()\n\n # Run tests in a worker.\n @ray.remote\n def run_tests_in_worker():\n test_api_in_multi_threads()\n return \"ok\"\n\n assert ray.get(run_tests_in_worker.remote()) == \"ok\"\n\n # Test actor that runs background threads.\n @ray.remote\n class MultithreadedActor(object):\n def __init__(self):\n self.lock = threading.Lock()\n self.thread_results = []\n\n def background_thread(self, wait_objects):\n try:\n # Test wait\n ready, _ = ray.wait(\n wait_objects,\n num_returns=len(wait_objects),\n timeout=1000.0,\n )\n assert len(ready) == len(wait_objects)\n for _ in range(20):\n num = 10\n # Test remote call\n results = [echo.remote(i) for i in range(num)]\n assert ray.get(results) == list(range(num))\n # Test put and get\n objects = [ray.put(i) for i in range(num)]\n assert ray.get(objects) == list(range(num))\n time.sleep(random.randint(0, 10) / 1000.0)\n except Exception as e:\n with self.lock:\n self.thread_results.append(e)\n else:\n with self.lock:\n self.thread_results.append(\"ok\")\n\n def spawn(self):\n wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]\n self.threads = [\n threading.Thread(\n target=self.background_thread, args=(wait_objects, ))\n for _ in range(20)\n ]\n [thread.start() for thread in self.threads]\n\n def join(self):\n [thread.join() for thread in self.threads]\n assert self.thread_results == [\"ok\"] * len(self.threads)\n return \"ok\"\n\n actor = MultithreadedActor.remote()\n actor.spawn.remote()\n ray.get(actor.join.remote()) == \"ok\"\n\n\ndef test_free_objects_multi_node(ray_start_cluster):\n # This test will do following:\n # 1. Create 3 raylets that each hold an actor.\n # 2. Each actor creates an object which is the deletion target.\n # 3. Wait 0.1 second for the objects to be deleted.\n # 4. Check that the deletion targets have been deleted.\n # Caution: if remote functions are used instead of actor methods,\n # one raylet may create more than one worker to execute the\n # tasks, so the flushing operations may be executed in different\n # workers and the plasma client holding the deletion target\n # may not be flushed.\n cluster = ray_start_cluster\n config = json.dumps({\"object_manager_repeated_push_delay_ms\": 1000})\n for i in range(3):\n cluster.add_node(\n num_cpus=1,\n resources={\"Custom{}\".format(i): 1},\n _internal_config=config)\n ray.init(address=cluster.address)\n\n class RawActor(object):\n def get(self):\n return ray.worker.global_worker.node.unique_id\n\n ActorOnNode0 = ray.remote(resources={\"Custom0\": 1})(RawActor)\n ActorOnNode1 = ray.remote(resources={\"Custom1\": 1})(RawActor)\n ActorOnNode2 = ray.remote(resources={\"Custom2\": 1})(RawActor)\n\n def create(actors):\n a = actors[0].get.remote()\n b = actors[1].get.remote()\n c = actors[2].get.remote()\n (l1, l2) = ray.wait([a, b, c], num_returns=3)\n assert len(l1) == 3\n assert len(l2) == 0\n return (a, b, c)\n\n def run_one_test(actors, local_only, delete_creating_tasks):\n (a, b, c) = create(actors)\n # The three objects should be generated on different object stores.\n assert ray.get(a) != ray.get(b)\n assert ray.get(a) != ray.get(c)\n assert ray.get(c) != ray.get(b)\n ray.internal.free(\n [a, b, c],\n local_only=local_only,\n delete_creating_tasks=delete_creating_tasks)\n # Wait for the objects to be deleted.\n time.sleep(0.1)\n return (a, b, c)\n\n actors = [\n ActorOnNode0.remote(),\n ActorOnNode1.remote(),\n ActorOnNode2.remote()\n ]\n # Case 1: run this local_only=False. All 3 objects will be deleted.\n (a, b, c) = run_one_test(actors, False, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)\n # All the objects are deleted.\n assert len(l1) == 0\n assert len(l2) == 3\n # Case 2: run this local_only=True. Only 1 object will be deleted.\n (a, b, c) = run_one_test(actors, True, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)\n # One object is deleted and 2 objects are not.\n assert len(l1) == 2\n assert len(l2) == 1\n # The deleted object will have the same store with the driver.\n local_return = ray.worker.global_worker.node.unique_id\n for object_id in l1:\n assert ray.get(object_id) != local_return\n\n # Case3: These cases test the deleting creating tasks for the object.\n (a, b, c) = run_one_test(actors, False, False)\n task_table = ray.tasks()\n for obj in [a, b, c]:\n assert ray._raylet.compute_task_id(obj).hex() in task_table\n\n (a, b, c) = run_one_test(actors, False, True)\n task_table = ray.tasks()\n for obj in [a, b, c]:\n assert ray._raylet.compute_task_id(obj).hex() not in task_table\n\n\ndef test_local_mode(shutdown_only):\n @ray.remote\n def local_mode_f():\n return np.array([0, 0])\n\n @ray.remote\n def local_mode_g(x):\n x[0] = 1\n return x\n\n ray.init(local_mode=True)\n\n @ray.remote\n def f():\n return np.ones([3, 4, 5])\n\n xref = f.remote()\n # Remote functions should return ObjectIDs.\n assert isinstance(xref, ray.ObjectID)\n assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))\n y = np.random.normal(size=[11, 12])\n # Check that ray.get(ray.put) is the identity.\n assert np.alltrue(y == ray.get(ray.put(y)))\n\n # Make sure objects are immutable, this example is why we need to copy\n # arguments before passing them into remote functions in python mode\n aref = local_mode_f.remote()\n assert np.alltrue(ray.get(aref) == np.array([0, 0]))\n bref = local_mode_g.remote(ray.get(aref))\n # Make sure local_mode_g does not mutate aref.\n assert np.alltrue(ray.get(aref) == np.array([0, 0]))\n assert np.alltrue(ray.get(bref) == np.array([1, 0]))\n\n # wait should return the first num_returns values passed in as the\n # first list and the remaining values as the second list\n num_returns = 5\n object_ids = [ray.put(i) for i in range(20)]\n ready, remaining = ray.wait(\n object_ids, num_returns=num_returns, timeout=None)\n assert ready == object_ids[:num_returns]\n assert remaining == object_ids[num_returns:]\n\n # Check that ray.put() and ray.internal.free() work in local mode.\n\n v1 = np.ones(10)\n v2 = np.zeros(10)\n\n k1 = ray.put(v1)\n assert np.alltrue(v1 == ray.get(k1))\n k2 = ray.put(v2)\n assert np.alltrue(v2 == ray.get(k2))\n\n ray.internal.free([k1, k2])\n with pytest.raises(Exception):\n ray.get(k1)\n with pytest.raises(Exception):\n ray.get(k2)\n\n # Should fail silently.\n ray.internal.free([k1, k2])\n\n # Test actors in LOCAL_MODE.\n\n @ray.remote\n class LocalModeTestClass(object):\n def __init__(self, array):\n self.array = array\n\n def set_array(self, array):\n self.array = array\n\n def get_array(self):\n return self.array\n\n def modify_and_set_array(self, array):\n array[0] = -1\n self.array = array\n\n @ray.method(num_return_vals=3)\n def returns_multiple(self):\n return 1, 2, 3\n\n test_actor = LocalModeTestClass.remote(np.arange(10))\n obj = test_actor.get_array.remote()\n assert isinstance(obj, ray.ObjectID)\n assert np.alltrue(ray.get(obj) == np.arange(10))\n\n test_array = np.arange(10)\n # Remote actor functions should not mutate arguments\n test_actor.modify_and_set_array.remote(test_array)\n assert np.alltrue(test_array == np.arange(10))\n # Remote actor functions should keep state\n test_array[0] = -1\n assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))\n\n # Check that actor handles work in local mode.\n\n @ray.remote\n def use_actor_handle(handle):\n array = np.ones(10)\n handle.set_array.remote(array)\n assert np.alltrue(array == ray.get(handle.get_array.remote()))\n\n ray.get(use_actor_handle.remote(test_actor))\n\n # Check that exceptions are deferred until ray.get().\n\n exception_str = \"test_basic remote task exception\"\n\n @ray.remote\n def throws():\n raise Exception(exception_str)\n\n obj = throws.remote()\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj)\n\n # Check that multiple return values are handled properly.\n\n @ray.remote(num_return_vals=3)\n def returns_multiple():\n return 1, 2, 3\n\n obj1, obj2, obj3 = returns_multiple.remote()\n assert ray.get(obj1) == 1\n assert ray.get(obj2) == 2\n assert ray.get(obj3) == 3\n assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]\n\n obj1, obj2, obj3 = test_actor.returns_multiple.remote()\n assert ray.get(obj1) == 1\n assert ray.get(obj2) == 2\n assert ray.get(obj3) == 3\n assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]\n\n @ray.remote(num_return_vals=2)\n def returns_multiple_throws():\n raise Exception(exception_str)\n\n obj1, obj2 = returns_multiple_throws.remote()\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj)\n ray.get(obj1)\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj2)\n\n # Check that Actors are not overwritten by remote calls from different\n # classes.\n @ray.remote\n class RemoteActor1(object):\n def __init__(self):\n pass\n\n def function1(self):\n return 0\n\n @ray.remote\n class RemoteActor2(object):\n def __init__(self):\n pass\n\n def function2(self):\n return 1\n\n actor1 = RemoteActor1.remote()\n _ = RemoteActor2.remote()\n assert ray.get(actor1.function1.remote()) == 0\n\n # Test passing ObjectIDs.\n @ray.remote\n def direct_dep(input):\n return input\n\n @ray.remote\n def indirect_dep(input):\n return ray.get(direct_dep.remote(input[0]))\n\n assert ray.get(indirect_dep.remote([\"hello\"])) == \"hello\"\n\n\ndef test_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=2)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n time_buffer = 2\n\n # At most 10 copies of this can run at once.\n @ray.remote(num_cpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(10)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(11)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_cpus=3)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_gpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(2)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_multi_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=10)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n @ray.remote(num_cpus=1, num_gpus=9)\n def f(n):\n time.sleep(n)\n\n @ray.remote(num_cpus=9, num_gpus=1)\n def g(n):\n time.sleep(n)\n\n time_buffer = 2\n\n start_time = time.time()\n ray.get([f.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_gpu_ids(shutdown_only):\n num_gpus = 10\n ray.init(num_cpus=10, num_gpus=num_gpus)\n\n def get_gpu_ids(num_gpus_per_worker):\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == num_gpus_per_worker\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))\n f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))\n f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))\n f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))\n f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))\n\n # Wait for all workers to start up.\n @ray.remote\n def f():\n time.sleep(0.1)\n return os.getpid()\n\n start_time = time.time()\n while True:\n if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:\n break\n if time.time() > start_time + 10:\n raise RayTestTimeoutException(\n \"Timed out while waiting for workers to start \"\n \"up.\")\n\n list_of_ids = ray.get([f0.remote() for _ in range(10)])\n assert list_of_ids == 10 * [[]]\n\n list_of_ids = ray.get([f1.remote() for _ in range(10)])\n set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}\n assert set_of_ids == {(i, ) for i in range(10)}\n\n list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])\n all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]\n assert set(all_ids) == set(range(10))\n\n # There are only 10 GPUs, and each task uses 5 GPUs, so there should only\n # be 2 tasks scheduled at a given time.\n t1 = time.time()\n ray.get([f5.remote() for _ in range(20)])\n assert time.time() - t1 >= 10 * 0.1\n\n # Test that actors have CUDA_VISIBLE_DEVICES set properly.\n\n @ray.remote\n class Actor0(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n @ray.remote(num_gpus=1)\n class Actor1(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n a0 = Actor0.remote()\n ray.get(a0.test.remote())\n\n a1 = Actor1.remote()\n ray.get(a1.test.remote())\n\n\ndef test_zero_cpus(shutdown_only):\n ray.init(num_cpus=0)\n\n # We should be able to execute a task that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n def f():\n return 1\n\n ray.get(f.remote())\n\n # We should be able to create an actor that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n class Actor(object):\n def method(self):\n pass\n\n a = Actor.remote()\n x = a.method.remote()\n ray.get(x)\n\n\ndef test_zero_cpus_actor(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0)\n cluster.add_node(num_cpus=2)\n ray.init(address=cluster.address)\n\n node_id = ray.worker.global_worker.node.unique_id\n\n @ray.remote\n class Foo(object):\n def method(self):\n return ray.worker.global_worker.node.unique_id\n\n # Make sure tasks and actors run on the remote raylet.\n a = Foo.remote()\n assert ray.get(a.method.remote()) != node_id\n\n\ndef test_fractional_resources(shutdown_only):\n ray.init(num_cpus=6, num_gpus=3, resources={\"Custom\": 1})\n\n @ray.remote(num_gpus=0.5)\n class Foo1(object):\n def method(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n return gpu_ids[0]\n\n foos = [Foo1.remote() for _ in range(6)]\n gpu_ids = ray.get([f.method.remote() for f in foos])\n for i in range(3):\n assert gpu_ids.count(i) == 2\n del foos\n\n @ray.remote\n class Foo2(object):\n def method(self):\n pass\n\n # Create an actor that requires 0.7 of the custom resource.\n f1 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ray.get(f1.method.remote())\n # Make sure that we cannot create an actor that requires 0.7 of the\n # custom resource. TODO(rkn): Re-enable this once ray.wait is\n # implemented.\n f2 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ready, _ = ray.wait([f2.method.remote()], timeout=0.5)\n assert len(ready) == 0\n # Make sure we can start an actor that requries only 0.3 of the custom\n # resource.\n f3 = Foo2._remote([], {}, resources={\"Custom\": 0.3})\n ray.get(f3.method.remote())\n\n del f1, f3\n\n # Make sure that we get exceptions if we submit tasks that require a\n # fractional number of resources greater than 1.\n\n @ray.remote(num_cpus=1.5)\n def test():\n pass\n\n with pytest.raises(ValueError):\n test.remote()\n\n with pytest.raises(ValueError):\n Foo2._remote([], {}, resources={\"Custom\": 1.5})\n\n\ndef test_multiple_raylets(ray_start_cluster):\n # This test will define a bunch of tasks that can only be assigned to\n # specific raylets, and we will check that they are assigned\n # to the correct raylets.\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=11, num_gpus=0)\n cluster.add_node(num_cpus=5, num_gpus=5)\n cluster.add_node(num_cpus=10, num_gpus=1)\n ray.init(address=cluster.address)\n cluster.wait_for_nodes()\n\n # Define a bunch of remote functions that all return the socket name of\n # the plasma store. Since there is a one-to-one correspondence between\n # plasma stores and raylets (at least right now), this can be\n # used to identify which raylet the task was assigned to.\n\n # This must be run on the zeroth raylet.\n @ray.remote(num_cpus=11)\n def run_on_0():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the first raylet.\n @ray.remote(num_gpus=2)\n def run_on_1():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the second raylet.\n @ray.remote(num_cpus=6, num_gpus=1)\n def run_on_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This can be run anywhere.\n @ray.remote(num_cpus=0, num_gpus=0)\n def run_on_0_1_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the first or second raylet.\n @ray.remote(num_gpus=1)\n def run_on_1_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n # This must be run on the zeroth or second raylet.\n @ray.remote(num_cpus=8)\n def run_on_0_2():\n return ray.worker.global_worker.node.plasma_store_socket_name\n\n def run_lots_of_tasks():\n names = []\n results = []\n for i in range(100):\n index = np.random.randint(6)\n if index == 0:\n names.append(\"run_on_0\")\n results.append(run_on_0.remote())\n elif index == 1:\n names.append(\"run_on_1\")\n results.append(run_on_1.remote())\n elif index == 2:\n names.append(\"run_on_2\")\n results.append(run_on_2.remote())\n elif index == 3:\n names.append(\"run_on_0_1_2\")\n results.append(run_on_0_1_2.remote())\n elif index == 4:\n names.append(\"run_on_1_2\")\n results.append(run_on_1_2.remote())\n elif index == 5:\n names.append(\"run_on_0_2\")\n results.append(run_on_0_2.remote())\n return names, results\n\n client_table = ray.nodes()\n store_names = []\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 0\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 5\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 1\n ]\n assert len(store_names) == 3\n\n def validate_names_and_results(names, results):\n for name, result in zip(names, ray.get(results)):\n if name == \"run_on_0\":\n assert result in [store_names[0]]\n elif name == \"run_on_1\":\n assert result in [store_names[1]]\n elif name == \"run_on_2\":\n assert result in [store_names[2]]\n elif name == \"run_on_0_1_2\":\n assert (result in [\n store_names[0], store_names[1], store_names[2]\n ])\n elif name == \"run_on_1_2\":\n assert result in [store_names[1], store_names[2]]\n elif name == \"run_on_0_2\":\n assert result in [store_names[0], store_names[2]]\n else:\n raise Exception(\"This should be unreachable.\")\n assert set(ray.get(results)) == set(store_names)\n\n names, results = run_lots_of_tasks()\n validate_names_and_results(names, results)\n\n # Make sure the same thing works when this is nested inside of a task.\n\n @ray.remote\n def run_nested1():\n names, results = run_lots_of_tasks()\n return names, results\n\n @ray.remote\n def run_nested2():\n names, results = ray.get(run_nested1.remote())\n return names, results\n\n names, results = ray.get(run_nested2.remote())\n validate_names_and_results(names, results)\n\n\ndef test_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 0})\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 1})\n ray.init(address=cluster.address)\n\n @ray.remote\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource\": 1})\n def h():\n ray.get([f.remote() for _ in range(5)])\n return ray.worker.global_worker.node.unique_id\n\n # The f tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n\n node_id = ray.worker.global_worker.node.unique_id\n\n # The g tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([g.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != node_id\n\n # Make sure that resource bookkeeping works when a task that uses a\n # custom resources gets blocked.\n ray.get([h.remote() for _ in range(5)])\n\n\ndef test_node_id_resource(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3)\n cluster.add_node(num_cpus=3)\n ray.init(address=cluster.address)\n\n local_node = ray.state.current_node_id()\n\n # Note that these will have the same IP in the test cluster\n assert len(ray.state.node_ids()) == 2\n assert local_node in ray.state.node_ids()\n\n @ray.remote(resources={local_node: 1})\n def f():\n return ray.state.current_node_id()\n\n # Check the node id resource is automatically usable for scheduling.\n assert ray.get(f.remote()) == ray.state.current_node_id()\n\n\ndef test_two_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 1,\n \"CustomResource2\": 2\n })\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 3,\n \"CustomResource2\": 4\n })\n ray.init(address=cluster.address)\n\n @ray.remote(resources={\"CustomResource1\": 1})\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource2\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource1\": 1, \"CustomResource2\": 3})\n def h():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource1\": 4})\n def j():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n @ray.remote(resources={\"CustomResource3\": 1})\n def k():\n time.sleep(0.001)\n return ray.worker.global_worker.node.unique_id\n\n # The f and g tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2\n\n node_id = ray.worker.global_worker.node.unique_id\n\n # The h tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([h.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != node_id\n\n # Make sure that tasks with unsatisfied custom resource requirements do\n # not get scheduled.\n ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)\n assert ready_ids == []\n\n\ndef test_many_custom_resources(shutdown_only):\n num_custom_resources = 10000\n total_resources = {\n str(i): np.random.randint(1, 7)\n for i in range(num_custom_resources)\n }\n ray.init(num_cpus=5, resources=total_resources)\n\n def f():\n return 1\n\n remote_functions = []\n for _ in range(20):\n num_resources = np.random.randint(0, num_custom_resources + 1)\n permuted_resources = np.random.permutation(\n num_custom_resources)[:num_resources]\n random_resources = {\n str(i): total_resources[str(i)]\n for i in permuted_resources\n }\n remote_function = ray.remote(resources=random_resources)(f)\n remote_functions.append(remote_function)\n\n remote_functions.append(ray.remote(f))\n remote_functions.append(ray.remote(resources=total_resources)(f))\n\n results = []\n for remote_function in remote_functions:\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n\n ray.get(results)\n\n\n# TODO: 5 retry attempts may be too little for Travis and we may need to\n# increase it if this test begins to be flaky on Travis.\ndef test_zero_capacity_deletion_semantics(shutdown_only):\n ray.init(num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})\n\n def test():\n resources = ray.available_resources()\n MAX_RETRY_ATTEMPTS = 5\n retry_count = 0\n\n del resources[\"memory\"]\n del resources[\"object_store_memory\"]\n for key in list(resources.keys()):\n if key.startswith(\"node:\"):\n del resources[key]\n\n while resources and retry_count < MAX_RETRY_ATTEMPTS:\n time.sleep(0.1)\n resources = ray.available_resources()\n retry_count += 1\n\n if retry_count >= MAX_RETRY_ATTEMPTS:\n raise RuntimeError(\n \"Resources were available even after five retries.\", resources)\n\n return resources\n\n function = ray.remote(\n num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})(test)\n cluster_resources = ray.get(function.remote())\n\n # All cluster resources should be utilized and\n # cluster_resources must be empty\n assert cluster_resources == {}\n\n\[email protected]\ndef save_gpu_ids_shutdown_only():\n # Record the curent value of this environment variable so that we can\n # reset it after the test.\n original_gpu_ids = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n yield None\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Reset the environment variable.\n if original_gpu_ids is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = original_gpu_ids\n else:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n\ndef test_specific_gpus(save_gpu_ids_shutdown_only):\n allowed_gpu_ids = [4, 5, 6]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in allowed_gpu_ids])\n ray.init(num_gpus=3)\n\n @ray.remote(num_gpus=1)\n def f():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert gpu_ids[0] in allowed_gpu_ids\n\n @ray.remote(num_gpus=2)\n def g():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert gpu_ids[0] in allowed_gpu_ids\n assert gpu_ids[1] in allowed_gpu_ids\n\n ray.get([f.remote() for _ in range(100)])\n ray.get([g.remote() for _ in range(100)])\n\n\ndef test_blocking_tasks(ray_start_regular):\n @ray.remote\n def f(i, j):\n return (i, j)\n\n @ray.remote\n def g(i):\n # Each instance of g submits and blocks on the result of another\n # remote task.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.get(object_ids)\n\n @ray.remote\n def h(i):\n # Each instance of g submits and blocks on the result of another\n # remote task using ray.wait.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.wait(object_ids, num_returns=len(object_ids))\n\n ray.get([h.remote(i) for i in range(4)])\n\n @ray.remote\n def _sleep(i):\n time.sleep(0.01)\n return (i)\n\n @ray.remote\n def sleep():\n # Each instance of sleep submits and blocks on the result of\n # another remote task, which takes some time to execute.\n ray.get([_sleep.remote(i) for i in range(10)])\n\n ray.get(sleep.remote())\n\n\ndef test_max_call_tasks(ray_start_regular):\n @ray.remote(max_calls=1)\n def f():\n return os.getpid()\n\n pid = ray.get(f.remote())\n ray.tests.utils.wait_for_pid_to_exit(pid)\n\n @ray.remote(max_calls=2)\n def f():\n return os.getpid()\n\n pid1 = ray.get(f.remote())\n pid2 = ray.get(f.remote())\n assert pid1 == pid2\n ray.tests.utils.wait_for_pid_to_exit(pid1)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets\n # in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.node.unique_id\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets in a\n # roughly equal manner even when the tasks have dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(address=cluster.address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.node.unique_id\n\n # This object will be local to one of the raylets. Make sure\n # this doesn't prevent tasks from being scheduled on other raylets.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_tasks(num_tasks, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.tasks()) >= num_tasks:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.objects()) >= num_objects:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\"Timed out while waiting for global state.\")\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_global_state_api(shutdown_only):\n\n error_message = (\"The ray global state API cannot be used \"\n \"before ray.init has been called.\")\n\n with pytest.raises(Exception, match=error_message):\n ray.objects()\n\n with pytest.raises(Exception, match=error_message):\n ray.tasks()\n\n with pytest.raises(Exception, match=error_message):\n ray.nodes()\n\n with pytest.raises(Exception, match=error_message):\n ray.jobs()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n assert ray.cluster_resources()[\"CPU\"] == 5\n assert ray.cluster_resources()[\"GPU\"] == 3\n assert ray.cluster_resources()[\"CustomResource\"] == 1\n\n assert ray.objects() == {}\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n driver_task_id = ray.worker.global_worker.current_task_id.hex()\n\n # One task is put in the task table which corresponds to this driver.\n wait_for_num_tasks(1)\n task_table = ray.tasks()\n assert len(task_table) == 1\n assert driver_task_id == list(task_table.keys())[0]\n task_spec = task_table[driver_task_id][\"TaskSpec\"]\n nil_unique_id_hex = ray.UniqueID.nil().hex()\n nil_actor_id_hex = ray.ActorID.nil().hex()\n\n assert task_spec[\"TaskID\"] == driver_task_id\n assert task_spec[\"ActorID\"] == nil_actor_id_hex\n assert task_spec[\"Args\"] == []\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"FunctionID\"] == nil_unique_id_hex\n assert task_spec[\"ReturnObjectIDs\"] == []\n\n client_table = ray.nodes()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n def f(*xs):\n return 1\n\n x_id = ray.put(1)\n result_id = f.remote(1, \"hi\", x_id)\n\n # Wait for one additional task to complete.\n wait_for_num_tasks(1 + 1)\n task_table = ray.tasks()\n assert len(task_table) == 1 + 1\n task_id_set = set(task_table.keys())\n task_id_set.remove(driver_task_id)\n task_id = list(task_id_set)[0]\n\n task_spec = task_table[task_id][\"TaskSpec\"]\n assert task_spec[\"ActorID\"] == nil_actor_id_hex\n assert task_spec[\"Args\"] == [\n signature.DUMMY_TYPE, 1, signature.DUMMY_TYPE, \"hi\",\n signature.DUMMY_TYPE, x_id\n ]\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"ReturnObjectIDs\"] == [result_id]\n\n assert task_table[task_id] == ray.tasks(task_id)\n\n # Wait for two objects, one for the x_id and one for result_id.\n wait_for_num_objects(2)\n\n def wait_for_object_table():\n timeout = 10\n start_time = time.time()\n while time.time() - start_time < timeout:\n object_table = ray.objects()\n tables_ready = (object_table[x_id][\"ManagerIDs\"] is not None and\n object_table[result_id][\"ManagerIDs\"] is not None)\n if tables_ready:\n return\n time.sleep(0.1)\n raise RayTestTimeoutException(\n \"Timed out while waiting for object table to \"\n \"update.\")\n\n object_table = ray.objects()\n assert len(object_table) == 2\n\n assert object_table[x_id] == ray.objects(x_id)\n object_table_entry = ray.objects(result_id)\n assert object_table[result_id] == object_table_entry\n\n job_table = ray.jobs()\n\n assert len(job_table) == 1\n assert job_table[0][\"JobID\"] == job_id.hex()\n assert job_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError(object):\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n if sys.version_info >= (3, 0):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n else:\n import cStringIO\n self.output_buffer = cStringIO.StringIO()\n self.error_buffer = cStringIO.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n # It's important to make sure that these print statements occur even\n # without calling sys.stdout.flush() and sys.stderr.flush().\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n for i in range(200):\n assert str(i) in output_lines\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n\ndef test_specific_job_id():\n dummy_driver_id = ray.JobID.from_int(1)\n ray.init(num_cpus=1, job_id=dummy_driver_id)\n\n # in driver\n assert dummy_driver_id == ray._get_runtime_context().current_driver_id\n\n # in worker\n @ray.remote\n def f():\n return ray._get_runtime_context().current_driver_id\n\n assert dummy_driver_id == ray.get(f.remote())\n\n ray.shutdown()\n\n\ndef test_object_id_properties():\n id_bytes = b\"00112233445566778899\"\n object_id = ray.ObjectID(id_bytes)\n assert object_id.binary() == id_bytes\n object_id = ray.ObjectID.nil()\n assert object_id.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(b\"0123456789\")\n object_id = ray.ObjectID.from_random()\n assert not object_id.is_nil()\n assert object_id.binary() != id_bytes\n id_dumps = pickle.dumps(object_id)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_id\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(num_cpus=1, object_store_memory=int(10**8))\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.core_worker.object_exists(x_id)\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_ray_setproctitle(ray_start_2_cpus):\n @ray.remote\n class UniqueName(object):\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:f()\"\n\n @ray.remote\n def unique_1():\n assert setproctitle.getproctitle(\n ) == \"ray_worker:ray.tests.test_basic.unique_1()\"\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.WorkerID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(ray_start_2_cpus):\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(subprocess.check_output([\"ray\", \"stack\"]))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_pandas_parquet_serialization():\n # Only test this if pandas is installed\n pytest.importorskip(\"pandas\")\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"parquet-test\")\n pd.DataFrame({\"col1\": [0, 1], \"col2\": [0, 1]}).to_parquet(filename)\n with open(os.path.join(tempdir, \"parquet-compression\"), \"wb\") as f:\n table = pa.Table.from_arrays([pa.array([1, 2, 3])], [\"hello\"])\n pq.write_table(table, f, compression=\"lz4\")\n # Clean up\n shutil.rmtree(tempdir)\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n random_name = ray.ObjectID.from_random().hex()\n temp_raylet_socket_dir = \"/tmp/ray/tests/{}\".format(random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)\n\n\ndef test_raylet_is_robust_to_random_messages(ray_start_regular):\n node_manager_address = None\n node_manager_port = None\n for client in ray.nodes():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b\"asdf\")\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_non_ascii_comment(ray_start_regular):\n @ray.remote\n def f():\n # 日本語 Japanese comment\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\[email protected]\ndef echo(x):\n return x\n\n\[email protected]\nclass WithConstructor(object):\n def __init__(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\[email protected]\nclass WithoutConstructor(object):\n def set_data(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\nclass BaseClass(object):\n def __init__(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\[email protected]\nclass DerivedClass(BaseClass):\n def __init__(self, data):\n # Due to different behaviors of super in Python 2 and Python 3,\n # we use BaseClass directly here.\n BaseClass.__init__(self, data)\n\n\ndef test_load_code_from_local(shutdown_only):\n ray.init(load_code_from_local=True, num_cpus=4)\n message = \"foo\"\n # Test normal function.\n assert ray.get(echo.remote(message)) == message\n # Test actor class with constructor.\n actor = WithConstructor.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test actor class without constructor.\n actor = WithoutConstructor.remote()\n actor.set_data.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test derived actor class.\n actor = DerivedClass.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test using ray.remote decorator on raw classes.\n base_actor_class = ray.remote(num_cpus=1)(BaseClass)\n base_actor = base_actor_class.remote(message)\n assert ray.get(base_actor.get_data.remote()) == message\n\n\ndef test_shutdown_disconnect_global_state():\n ray.init(num_cpus=0)\n ray.shutdown()\n\n with pytest.raises(Exception) as e:\n ray.objects()\n assert str(e.value).endswith(\"ray.init has been called.\")\n\n\[email protected](\n \"ray_start_object_store_memory\", [150 * 1024 * 1024], indirect=True)\ndef test_put_pins_object(ray_start_object_store_memory):\n x_id = ray.put(\"HI\")\n x_copy = ray.ObjectID(x_id.binary())\n assert ray.get(x_copy) == \"HI\"\n\n # x cannot be evicted since x_id pins it\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n assert ray.get(x_id) == \"HI\"\n assert ray.get(x_copy) == \"HI\"\n\n # now it can be evicted since x_id pins it but x_copy does not\n del x_id\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(x_copy)\n\n # weakref put\n y_id = ray.put(\"HI\", weakref=True)\n for _ in range(10):\n ray.put(np.zeros(10 * 1024 * 1024))\n with pytest.raises(ray.exceptions.UnreconstructableError):\n ray.get(y_id)\n\n @ray.remote\n def check_no_buffer_ref(x):\n assert x[0].get_buffer_ref() is None\n\n z_id = ray.put(\"HI\")\n assert z_id.get_buffer_ref() is not None\n ray.get(check_no_buffer_ref.remote([z_id]))\n\n\[email protected](\n \"ray_start_object_store_memory\", [150 * 1024 * 1024], indirect=True)\ndef test_redis_lru_with_set(ray_start_object_store_memory):\n x = np.zeros(8 * 10**7, dtype=np.uint8)\n x_id = ray.put(x, weakref=True)\n\n # Remove the object from the object table to simulate Redis LRU eviction.\n removed = False\n start_time = time.time()\n while time.time() < start_time + 10:\n if ray.state.state.redis_clients[0].delete(b\"OBJECT\" +\n x_id.binary()) == 1:\n removed = True\n break\n assert removed\n\n # Now evict the object from the object store.\n ray.put(x) # This should not crash.\n\n\ndef test_decorated_function(ray_start_regular):\n def function_invocation_decorator(f):\n def new_f(args, kwargs):\n # Reverse the arguments.\n return f(args[::-1], {\"d\": 5}), kwargs\n\n return new_f\n\n def f(a, b, c, d=None):\n return a, b, c, d\n\n f.__ray_invocation_decorator__ = function_invocation_decorator\n f = ray.remote(f)\n\n result_id, kwargs = f.remote(1, 2, 3, d=4)\n assert kwargs == {\"d\": 4}\n assert ray.get(result_id) == (3, 2, 1, 5)\n\n\ndef test_get_postprocess(ray_start_regular):\n def get_postprocessor(object_ids, values):\n return [value for value in values if value > 0]\n\n ray.worker.global_worker._post_get_hooks.append(get_postprocessor)\n\n assert ray.get(\n [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]\n\n\ndef test_export_after_shutdown(ray_start_regular):\n # This test checks that we can use actor and remote function definitions\n # across multiple Ray sessions.\n\n @ray.remote\n def f():\n pass\n\n @ray.remote\n class Actor(object):\n def method(self):\n pass\n\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray and use the remote function and actor again.\n ray.init(num_cpus=1)\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray again and make sure that these definitions can be exported from\n # workers.\n ray.init(num_cpus=2)\n\n @ray.remote\n def export_definitions_from_worker(remote_function, actor_class):\n ray.get(remote_function.remote())\n actor_handle = actor_class.remote()\n ray.get(actor_handle.method.remote())\n\n ray.get(export_definitions_from_worker.remote(f, Actor))\n\n\ndef test_invalid_unicode_in_worker_log(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n # Wait till first worker log file is created.\n while True:\n log_file_paths = glob.glob(\"{}/worker*.out\".format(logs_dir))\n if len(log_file_paths) == 0:\n time.sleep(0.2)\n else:\n break\n\n with open(log_file_paths[0], \"wb\") as f:\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.write(b\"\\xe5abc\\nline2\\nline3\\n\")\n f.flush()\n\n # Wait till the log monitor reads the file.\n time.sleep(1.0)\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n\n\[email protected](reason=\"This test is too expensive to run.\")\ndef test_move_log_files_to_old(shutdown_only):\n info = ray.init(num_cpus=1)\n\n logs_dir = os.path.join(info[\"session_dir\"], \"logs\")\n\n @ray.remote\n class Actor(object):\n def f(self):\n print(\"function f finished\")\n\n # First create a temporary actor.\n actors = [\n Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)\n ]\n ray.get([a.f.remote() for a in actors])\n\n # Make sure no log files are in the \"old\" directory before the actors\n # are killed.\n assert len(glob.glob(\"{}/old/worker*.out\".format(logs_dir))) == 0\n\n # Now kill the actors so the files get moved to logs/old/.\n [a.__ray_terminate__.remote() for a in actors]\n\n while True:\n log_file_paths = glob.glob(\"{}/old/worker*.out\".format(logs_dir))\n if len(log_file_paths) > 0:\n with open(log_file_paths[0], \"r\") as f:\n assert \"function f finished\\n\" in f.readlines()\n break\n\n # Make sure that nothing has died.\n assert ray.services.remaining_processes_alive()\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.uint32",
"numpy.arange",
"numpy.uint8",
"numpy.int32",
"numpy.int8",
"pandas.DataFrame",
"numpy.ones",
"numpy.int64",
"numpy.random.normal",
"numpy.random.permutation",
"numpy.uint64",
"numpy.float64",
"numpy.float32",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
luansouzasilva31/desafio_icts | [
"3bdebf1cbab0f73b07dc5b96cebd55239cf79a73"
] | [
"categorical_classification/pyscripts/a_preprocessingDataset.py"
] | [
"import os, numpy as np, matplotlib.pyplot as plt\nfrom keras.preprocessing.image import load_img, img_to_array\n\n\ndir_images = '../cats_and_dogs_dataset/images/images'\ndir_save = '../cats_and_dogs_dataset/npz'\nif not os.path.exists(dir_save): os.mkdir(dir_save)\n\n\nnames = os.listdir(dir_images);\nnames = [i for i in names if i.endswith(\".jpg\")]\n# 1. Separar por raças\n# Pega cada palavra até o ultimo \"_\"\ngreed_names = [i[0:i.rindex('_')] for i in names]\n\n# Atribui categorias para cada raça\ncategories = {}; m=0\nfor i in greed_names:\n if i not in categories:\n categories[i]=m\n m+=1\n\n# 2. Importar data por raça\ngreed_images={}\nlabels={}\nfor i in categories:\n greed_images[i] = []; labels[i] = []\n for j in names:\n if i in j:\n img = load_img(dir_images+'/'+j, target_size=(224,224,3))\n img = img_to_array(img)\n greed_images[i].extend([img])\n labels[i].append(categories[i])\n\n# 3. Convertendo as categorias para array\nfor i in greed_images:\n greed_images[i] = np.asarray(greed_images[i])\n labels[i] = np.asarray(labels[i])\n\n# 4. Dividindo dados para treino e teste\np = 0.7 # porcentagem para treino\nq = 0.1 # porcentagem para validação\n\nfirst=True\nfor i in greed_images:\n if first: # é o primeiro loop\n first = False\n images_train = greed_images[i][:int(p*greed_images[i].shape[0])]\n images_val = greed_images[i][int(p*greed_images[i].shape[0]):int((p+q)*greed_images[i].shape[0])]\n images_test = greed_images[i][int((p+q)*greed_images[i].shape[0]):]\n \n labels_train = labels[i][:int(p*labels[i].shape[0])]\n labels_val = labels[i][int(p*labels[i].shape[0]):int((p+q)*labels[i].shape[0])]\n labels_test = labels[i][int((p+q)*labels[i].shape[0]):]\n \n else: # não é o primeiro loop\n images_train = np.concatenate((images_train, greed_images[i][:int(p*greed_images[i].shape[0])]))\n images_val = np.concatenate((images_val, greed_images[i][int(p*greed_images[i].shape[0]):int((p+q)*greed_images[i].shape[0])]))\n images_test = np.concatenate((images_test, greed_images[i][int((p+q)*greed_images[i].shape[0]):]))\n\n labels_train = np.concatenate((labels_train, labels[i][:int(p*labels[i].shape[0])]))\n labels_val = np.concatenate((labels_val, labels[i][int(p*labels[i].shape[0]):int((p+q)*labels[i].shape[0])]))\n labels_test = np.concatenate((labels_test, labels[i][int((p+q)*labels[i].shape[0]):]))\n\n\n# 5. Salvando data\nprint('Salvando data de treino, validação e teste.')\nprint('It will take some minutes...')\nnp.savez_compressed(dir_save + '/images_train', images_train)\nnp.save(dir_save + '/labels_train.npy', labels_train)\n\nnp.savez_compressed(dir_save + '/images_val', images_val)\nnp.save(dir_save + '/labels_val.npy', labels_val)\n\nnp.savez_compressed(dir_save + '/images_test', images_test)\nnp.save(dir_save + '/labels_test.npy', labels_test)\n\nprint('Salvamento concluído!')\n\n"
] | [
[
"numpy.asarray",
"numpy.savez_compressed",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
coastalcph/seq2sparql | [
"96ef08da86860d0802b9bcebac9ccceefde43b4a"
] | [
"sgnr_new.py"
] | [
"\n# -*- coding: utf-8 -*-\n\"\"\"systematicity-experiment-data-generation-v2.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1R7smQachUTswGMFNgsETtb8Zh3nZGIiZ\n\"\"\"\n\nimport json\n# Commented out IPython magic to ensure Python compatibility.\nimport os\nimport pickle\nimport random\nfrom collections import OrderedDict\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom ast import literal_eval\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport cProfile\ncp = cProfile.Profile()\ncp.enable()\n\nimport copy\n\n\n# !pip install snakeviz\n# %load_ext snakeviz\n\n# %load_ext line_profiler\n\n# !ls -l\n# clear_output()\n\n# from google.colab import drive\n# drive.mount('/content/gdrive', force_remount=True)\n# root_dir = \"/content/gdrive/My Drive/School/PhD/Colab Notebooks/\"\n# os.chdir(root_dir)\n# clear_output()\n\n# os.chdir(\"compositionality\")\n\ndef load_json(file_name):\n with open(file_name) as data_file:\n data = json.load(data_file, object_pairs_hook=OrderedDict)\n return data\n\n\ndef save_json(item, file_path):\n with open(file_path, 'w+') as fp:\n json_str = json.dumps(item, indent=4)\n fp.write(json_str)\n\n\ndef generate(thr=0):\n\n verb_classes = pd.read_csv('./annotations/EPIC_100_verb_classes.csv', converters={'instances': literal_eval},\n index_col='id')\n noun_classes = pd.read_csv('./annotations/EPIC_100_noun_classes.csv', converters={'instances': literal_eval},\n index_col='id')\n\n train_labels = pd.read_csv('./annotations/EPIC_100_train.csv', index_col='narration_id')\n labels = pd.concat([train_labels], sort=False)\n\n len(verb_classes), len(noun_classes)\n\n\n def tokenize_multiword(noun, verb, narr):\n\n if \"-\" in verb:\n v1, v2 = verb.split(\"-\")\n narr = narr.replace(v1 + \" \" + v2, v1 + \"-\" + v2)\n if \":\" in noun:\n ns = noun.split(\":\")\n if len(ns) == 2:\n n2, n1 = ns[0], ns[1]\n narr = narr.replace(n1 + \" \" + n2, n1 + \"_\" + n2)\n if len(ns) == 3:\n n3, n2, n1 = ns[0], ns[1], ns[2]\n narr = narr.replace(n1 + \" \" + n2 + \" \" + n3, n1 + \"_\" + n2 + \"_\" + n3)\n\n return narr\n\n pairs = {}\n narration_data = []\n\n source = []\n target = []\n U_ALL = OrderedDict()\n\n for item in labels.video_id.values:\n if item not in pairs:\n pairs[item] = 0\n\n window_size = 3\n\n for k, v in pairs.items():\n video_data = labels[labels['video_id'] == k].sort_values(by=\"start_timestamp\")\n count = 0\n\n nouns = video_data.noun.to_list()\n verbs = video_data.verb.to_list()\n narrations = video_data.narration.to_list()\n\n bad_words = [\"still\", \"continue\"]\n\n for i, (noun, verb, narration) in enumerate(zip(nouns, verbs, narrations)):\n\n if i < len(nouns) - window_size:\n bag = []\n sack = [tokenize_multiword(noun, verb, narration + \" . \")]\n narration_composition_sack = [verb + \"|\" + noun]\n narration_idx = [str(i)]\n skip = False\n\n for j in range(window_size):\n bag.append(nouns[i + j + 1])\n sack.append(tokenize_multiword(nouns[i+j+1], verbs[i+j+1], narrations[i + j + 1]) + \" . \")\n narration_composition_sack.append(\"+\" + verbs[i + j + 1] + \"|\" + nouns[i + j + 1])\n narration_idx.append(str(i + j + 1))\n\n if not len(sack) == len(set(sack)): # skip repeating narrations\n continue\n\n for w in bad_words:\n if w in \"\".join(sack):\n skip = True\n\n if skip:\n continue\n\n key = bag.pop()\n bag.append(noun)\n\n if key in bag:\n count = count + 1\n narration_text_tagged = \"\".join(sack)\n\n narration_text_tagged_new = []\n for item in narration_text_tagged.split(\" \"):\n if item in nouns:\n item = item + \"[N\" + str(nouns.index(item)) + \"]\"\n elif item in verbs:\n item = item + \"[V\" + str(verbs.index(item)) + \"]\"\n narration_text_tagged_new.append(item)\n\n narration_text_tagged_text = \" \".join(narration_text_tagged_new)\n narration_composition_keys = \"\".join(\n narration_composition_sack) # TADA: We have the whole compositions for any narration!!!\n narration_data.append(narration_text_tagged_text)\n trg = sack.pop() # + \" . \"\n src = \"\".join(sack) # + \" . \"\n source.append(src)\n target.append(trg)\n\n U_id = k + \"+\" + \"_\".join(\n narration_idx) # in the format of video_id + '+' + narration_index seperated by '_'\n U_ALL[U_id] = narration_composition_keys\n\n pairs[k] = count\n\n verb_noun_compositions = {}\n atom_distribution = OrderedDict()\n compound_distribution = OrderedDict()\n\n all_dict = {}\n\n for i, (k, v) in enumerate(U_ALL.items()):\n\n video_id, narration_idx_str = k.split(\"+\")\n\n compositions_in_narration = v.split(\"+\")\n\n for composition in compositions_in_narration:\n\n verb, noun = composition.split(\"|\")\n\n if verb not in all_dict:\n all_dict[verb] = {}\n else:\n if noun not in all_dict[verb]:\n all_dict[verb][noun] = 1\n else:\n all_dict[verb][noun] = all_dict[verb][noun] + 1\n\n if composition not in verb_noun_compositions:\n verb_noun_compositions[composition] = 1\n else:\n verb_noun_compositions[composition] = verb_noun_compositions[composition] + 1\n\n # ATOM DISTRIBUTION\n if noun not in atom_distribution:\n atom_distribution[noun] = 1\n else:\n atom_distribution[noun] += 1\n\n if verb not in atom_distribution:\n atom_distribution[verb] = 1\n else:\n atom_distribution[verb] += 1\n\n # COMPOUND DISTRIBUTION\n if composition not in compound_distribution:\n compound_distribution[composition] = 1\n else:\n compound_distribution[composition] += 1\n\n save_json(all_dict, \"all_compositions_newest.json\")\n\n \"\"\"**Greedy Fast Approach**\"\"\"\n\n # def zero_division(n, d):\n # return n / d if d else 0\n\n def chernoff_fast(hist1, hist2, alfa):\n \"\"\"\n Measure divergence (or similarity) of the weighted distributions using\n the Chernoff coefficient Cα(P ∥Q) = sum(p^α q^1−α) ∈ [0, 1] (Chung et al., 1989).\n \"\"\"\n\n chernoff_coef = 0.0\n alfa_minus = 1 - alfa\n div_coeff = 1e-32 # to avoid division by zero\n\n total_items_1 = sum(hist1) + div_coeff\n total_items_2 = sum(hist2) + div_coeff\n\n # h1 = set(compress(itertools.count(), hist1))\n # h2 = set(compress(itertools.count(), hist2))\n\n # nonzeros = h1 & h2\n\n # for inx in nonzeros:\n # p = hist1[inx] / total_items_1\n # q = hist2[inx] / total_items_2\n for i, (item1, item2) in enumerate(zip(hist1, hist2)):\n p = item1/total_items_1\n q = item2/total_items_2\n \n chernoff_coef += p ** alfa * q ** alfa_minus\n\n return chernoff_coef\n\n # def get_compound_freq(L, RL):\n # cf = [0] * len(RL)\n #\n # S = set(L)\n # # RL = list(R.keys())\n #\n # for s in S:\n # cf[RL.index(s)] = L.count(s)\n #\n # return cf\n\n def get_compound_freq_tabled(CF, compounds):\n\n for compound in compounds:\n CF[compound] += 1\n\n return CF\n\n def get_compound_freq_tabled_remove(CF, compounds):\n\n for compound in compounds:\n CF[compound] -= 1\n\n return CF\n\n # def get_atom_freq(L, RL):\n # atoms = [] # [0] * len(R)\n #\n # for compound in L:\n # atoms.extend(compound.split(\"|\"))\n #\n # af = [0] * len(RL)\n #\n # S = set(atoms)\n #\n # for s in S:\n # af[RL.index(s)] = atoms.count(s)\n #\n # return af\n\n def get_atom_freq_tabled(AF, atoms):\n\n for atom in atoms:\n AF[atom] += 1\n\n return AF\n\n def get_atom_freq_tabled_remove(AF, atoms):\n\n for atom in atoms:\n AF[atom] -= 1\n\n return AF\n\n # def get_divergence(V, W, ADL, CDL, atom_divergence, compound_divergence):\n # V_flat = []\n # W_flat = []\n #\n # for item in V:\n # V_flat.extend(item.split(\"+\"))\n #\n # for item in W:\n # W_flat.extend(item.split(\"+\"))\n #\n # FC_V = get_compound_freq(V_flat, CDL)\n # FC_W = get_compound_freq(W_flat, CDL)\n # FA_V = get_atom_freq(V_flat, ADL)\n # FA_W = get_atom_freq(W_flat, ADL)\n #\n # # DC(V∥W)=1 − C0.1(FC(V)∥FC(W))\n # # DA(V∥W)=1 − C0.5(FA(V)∥FA(W))\n # # According to chernoff coeff.\n # # Cα(P ∥Q) = pα q1−α ∈ [0, 1]\n # chernoff_coef_C = chernoff_fast(FC_V, FC_W, compound_divergence)\n # chernoff_coef_A = chernoff_fast(FA_V, FA_W, atom_divergence)\n # DC_VW = 1.0 - chernoff_coef_C\n # DA_VW = 1.0 - chernoff_coef_A\n #\n # return DA_VW, FA_V, FA_W, DC_VW, FC_V, FC_W\n #\n # def get_divergence_what_if_atom_only(V_flat, W_flat, ADL, atom_divergence):\n # FA_V = get_atom_freq(V_flat, ADL)\n # FA_W = get_atom_freq(W_flat, ADL)\n #\n # # DA(V∥W)=1 − C0.5(FA(V)∥FA(W))\n # # According to chernoff coeff.\n # # Cα(P ∥Q) = pα q1−α ∈ [0, 1]\n # chernoff_coef_A = chernoff_fast(FA_V, FA_W, atom_divergence)\n # DA_VW = 1.0 - chernoff_coef_A\n #\n # return DA_VW, FA_V, FA_W\n\n def get_divergence_what_if_atom_only_tabled(S, AF_table_V, AF_table_W, atoms, atom_divergence):\n # FA_V = get_atom_freq(V_flat, ADL)\n # FA_W = get_atom_freq(W_flat, ADL)\n\n if S == \"V\":\n AF_table_V = get_atom_freq_tabled(AF_table_V, atoms)\n elif S == \"W\":\n AF_table_W = get_atom_freq_tabled(AF_table_W, atoms)\n\n # FA_V = list(AF_table_V.values())\n # FA_W = list(AF_table_W.values())\n\n # DA(V∥W)=1 − C0.5(FA(V)∥FA(W))\n # According to chernoff coeff.\n # Cα(P ∥Q) = pα q1−α ∈ [0, 1]\n chernoff_coef_A = chernoff_fast(list(AF_table_V.values()), list(AF_table_W.values()), atom_divergence)\n DA_VW = 1.0 - chernoff_coef_A\n\n return DA_VW, AF_table_V, AF_table_W\n\n def get_divergence_what_if_compound_only_tabled(S,CF_table_V,CF_table_W, compounds, compound_divergence):\n\n if S == \"V\":\n CF_table_V = get_compound_freq_tabled(CF_table_V, compounds)\n elif S == \"W\":\n CF_table_W = get_compound_freq_tabled(CF_table_W, compounds)\n\n # FC_V = list(CF_table_V.values())\n # FC_W = list(CF_table_W.values())\n\n # DC(V∥W)=1 − C0.1(FC(V)∥FC(W))\n # According to chernoff coeff.\n # Cα(P ∥Q) = pα q1−α ∈ [0, 1]\n chernoff_coef_C = chernoff_fast(list(CF_table_V.values()), list(CF_table_W.values()), compound_divergence)\n DC_VW = 1.0 - chernoff_coef_C\n\n return DC_VW, CF_table_V, CF_table_W\n\n # def get_divergence_what_if_compound_only(V_flat, W_flat, CDL, compound_divergence):\n # FC_V = get_compound_freq(V_flat, CDL)\n # FC_W = get_compound_freq(W_flat, CDL)\n #\n # # DC(V∥W)=1 − C0.1(FC(V)∥FC(W))\n # # According to chernoff coeff.\n # # Cα(P ∥Q) = pα q1−α ∈ [0, 1]\n # chernoff_coef_C = chernoff_fast(FC_V, FC_W, compound_divergence)\n # DC_VW = 1.0 - chernoff_coef_C\n #\n # return DC_VW, FC_V, FC_W\n\n def greedy_fast(thr=0):\n # beginning of the greedy algorithm described in the paper\n\n AD = OrderedDict(atom_distribution)\n CD = OrderedDict(compound_distribution)\n\n AF_table_V = OrderedDict()\n CF_table_V = OrderedDict()\n AF_table_W = OrderedDict()\n CF_table_W = OrderedDict()\n\n for k,v in AD.items():\n AF_table_V[k] = 0\n AF_table_W[k] = 0\n\n for k,v in CD.items():\n CF_table_V[k] = 0\n CF_table_W[k] = 0\n\n # ADL = list(AD.keys())\n # CDL = list(CD.keys())\n U = list(U_ALL.values())\n\n atom_divergence = 0.5 # cf. Keysers et al 2020\n compound_divergence = 0.1 # cf. Keysers et al 2020\n\n # DC(V∥W)=1 − C0.1(FC(V)∥FC(W))\n # DA(V∥W)=1 − C0.5(FA(V)∥FA(W))\n\n # To construct such an experiment for a dataset U and a desired combination of atom and compound divergences,\n # we use an iterative greedy algorithm that starts with empty sets V (train) and W (test), and then alternates\n # between adding an example u ∈ U to V or W (while maintaining the desired train/test ratio).\n # At each iteration, the element u is selected such that DC (V ∥W ) and DA (V ∥W ) are kept as closely as\n # possible to the desired values.\n # To reduce the risk of being stuck in a local optimum, we also allow removing examples at certain iterations.\n\n quit = 0 # termination counter\n V = [] # train split\n W = [] # test split\n V_idx = []\n W_idx = []\n\n V_dict = OrderedDict()\n W_dict = OrderedDict()\n\n # Splits = {'V': V_dict, 'W': W_dict}\n\n # bins_C = [x for x in range(0, len(CD))]\n # bins_A = [x for x in range(0, len(AD))]\n U_small = list(U[0:])\n\n DA_VW = 0.0\n i = 0\n inx_U = [x for x in range(0, len(U_small))]\n\n atoms_dict_inx = OrderedDict()\n compounds_dict_inx = OrderedDict()\n\n for u_ind in inx_U:\n u = U[u_ind]\n\n compounds = u.split(\"+\")\n compounds_dict_inx[u_ind] = compounds\n atom_list = []\n\n for compound in compounds:\n atom_list.extend(compound.split(\"|\"))\n\n atoms_dict_inx[u_ind] = list(atom_list)\n\n # V = {} # Train\n # W = {} # Test\n # Until all samples u \\in U are assigned to V or W:\n # 1. Pick which split S to add a sample to next.\n # 2. For each u \\in U that wasn't assigned yet:\n # 2a. if S = V: V' = V \\union u, W' = W, else V' = V, W' = W \\union u\n # 2b. Compute potential atom divergence D_A(V' || W') and compound divergence D_C(V' || W')\n # 3. Select the u which is best (D_A and D_C close to target)\n\n def add_to_split(S, V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V, CF_table_W, atoms, compounds, u, ind, inx_U):\n\n if S == \"V\":\n V.append(u) # train split\n V_idx.append(ind)\n V = list(set(V))\n AF_table_V = get_atom_freq_tabled(AF_table_V, atoms)\n CF_table_V = get_compound_freq_tabled(CF_table_V, compounds)\n\n else: # W:\n W.append(u) # test split\n W_idx.append(ind)\n W = list(set(W))\n AF_table_W = get_atom_freq_tabled(AF_table_W, atoms)\n CF_table_W = get_compound_freq_tabled(CF_table_W, compounds)\n\n inx_U.remove(ind) # remove what we've added so that we don't add it to another split!\n\n return V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V, CF_table_W, inx_U\n\n # def add_to_split_new(S, Splits, u, ind, inx_U):\n #\n # # Splits[S]\n #\n # if S == \"V\":\n # V.append(u) # train split\n # V_idx.append(ind)\n # V = list(set(V))\n # else: # W:\n # W.append(u) # test split\n # W_idx.append(ind)\n # W = list(set(W))\n #\n # inx_U.remove(ind) # remove what we've added so that we don't add it to another split!\n\n def remove_from_split(S, V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V,CF_table_W,atoms, compounds, u, ind, inx_U):\n\n if S == \"V\":\n V.remove(u) # train split\n V_idx.remove(ind)\n V = list(set(V))\n AF_table_V = get_atom_freq_tabled_remove(AF_table_V, atoms)\n CF_table_V = get_compound_freq_tabled_remove(CF_table_V, compounds)\n else: # W:\n W.remove(u) # test split\n W_idx.remove(ind)\n W = list(set(W))\n AF_table_W = get_atom_freq_tabled_remove(AF_table_W, atoms)\n CF_table_W = get_compound_freq_tabled_remove(CF_table_W, compounds)\n\n inx_U.append(ind) # add what we've removed so that we don't remove it from another split!\n\n return V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V,CF_table_W, inx_U\n\n\n # ===============================================================================================\n\n # read files from directory\n # idx, keys, src, trg (train/test/val)\n _train_idx = load_json(\"8500/train_idx.json\")\n _test_idx = load_json(\"8500/test_idx.json\")\n _val_idx = load_json(\"8500/val_idx.json\")\n # _train_keys = json.loads(\"9000/train_keys.json\")\n # _test_keys = json.loads(\"9000/test_keys.json\")\n # _val_keys = json.loads(\"9000/val_keys.json\")\n\n S = \"V\"\n\n for ind in _train_idx:\n u = U[ind]\n V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V, CF_table_W, inx_U = add_to_split(S, V, W,\n V_idx, W_idx,\n AF_table_V,\n AF_table_W,\n CF_table_V,\n CF_table_W,\n atoms_dict_inx[\n ind],\n compounds_dict_inx[\n ind],\n u,\n ind,\n inx_U)\n i += 1\n print(\"%s Adding %s \\t\\t to %s \\t Remaining: %s\" % (i, u, S, len(inx_U)))\n\n S = \"W\"\n\n for ind in _test_idx:\n u = U[ind]\n V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V, CF_table_W, inx_U = add_to_split(S, V, W,\n V_idx, W_idx,\n AF_table_V,\n AF_table_W,\n CF_table_V,\n CF_table_W,\n atoms_dict_inx[\n ind],\n compounds_dict_inx[\n ind],\n u,\n ind,\n inx_U)\n i += 1\n print(\"%s Adding %s \\t\\t to %s \\t Remaining: %s\" % (i, u, S, len(inx_U)))\n\n S = \"W\"\n\n for ind in _val_idx:\n u = U[ind]\n V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V, CF_table_W, inx_U = add_to_split(S, V, W,\n V_idx, W_idx,\n AF_table_V,\n AF_table_W,\n CF_table_V,\n CF_table_W,\n atoms_dict_inx[\n ind],\n compounds_dict_inx[\n ind],\n u,\n ind,\n inx_U)\n i += 1\n print(\"%s Adding %s \\t\\t to %s \\t Remaining: %s\" % (i, u, S, len(inx_U)))\n\n # ===============================================================================================\n\n while len(inx_U) > 0: # continue until no item left to allocate\n\n # STEP 1 : Determine which split S to add u\n probability = random.random()\n\n if probability > 0.5: # add u to V\n S = \"V\"\n else: # add u to W\n S = \"W\"\n\n if i == 0: # if it is the first item pick randomly\n ind = random.choice(inx_U)\n u = U[ind]\n V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V, CF_table_W, inx_U = add_to_split(S, V, W,\n V_idx, W_idx,\n AF_table_V,\n AF_table_W,\n CF_table_V,\n CF_table_W,\n atoms_dict_inx[ind],\n compounds_dict_inx[ind],\n u,\n ind,\n inx_U)\n i += 1\n print(\"%s Adding %s \\t\\t to %s \\t Remaining: %s\" % (i, u, S,len(inx_U)))\n else:\n\n # STEP 2. For each u \\in U that wasn't assigned yet:\n\n divergence_scores_C = OrderedDict()\n divergence_scores_A = OrderedDict()\n\n for u_ind in inx_U:\n\n # u_tmp = U[u_ind]\n # V_tmp = list(V)\n # W_tmp = list(W)\n\n # ----------------\n AF_table_V_tmp = OrderedDict(AF_table_V)\n AF_table_W_tmp = OrderedDict(AF_table_W)\n\n DA_VW, _AF_table_V, _AF_table_W = get_divergence_what_if_atom_only_tabled(S, AF_table_V_tmp,\n AF_table_W_tmp,\n atoms_dict_inx[u_ind],\n atom_divergence) # train split\n \n divergence_scores_A[u_ind] = DA_VW\n\n '''\n\n DA_VW, FA_V, FA_W, DC_VW, FC_V, FC_W = get_divergence_what_if(V_flat, W_flat, ADL, CDL,\n atom_divergence,\n compound_divergence)\n\n divergence_scores_C[u_ind] = DC_VW # {\"DC_VW\": DC_VW, \"DA_VW\": DA_VW}\n divergence_scores_A[u_ind] = DA_VW\n divergence_scores_AC_ratio[u_ind] = DA_VW / DC_VW\n '''\n\n filtered_inx_U_by_A = [k for k,v in divergence_scores_A.items() if v <= 0.02]\n\n if len(filtered_inx_U_by_A) == 0:\n best_u_ind_A = min(divergence_scores_A, key=divergence_scores_A.get)\n filtered_inx_U_by_A.append(best_u_ind_A)\n # elif len(filtered_inx_U_by_A) > 10:\n # filtered_inx_U_by_A = random.sample(filtered_inx_U_by_A, 10)\n # print(filtered_inx_U_by_A, divergence_scores_A[filtered_inx_U_by_A[0]])\n # else:\n #print(len(filtered_inx_U_by_A))\n\n for u_ind in inx_U:\n\n if u_ind in filtered_inx_U_by_A:\n # u_tmp = U[u_ind]\n\n CF_table_V_tmp = OrderedDict(CF_table_V)\n CF_table_W_tmp = OrderedDict(CF_table_W)\n\n DC_VW, _CF_table_V, _CF_table_W = get_divergence_what_if_compound_only_tabled(S,CF_table_V_tmp,\n CF_table_W_tmp,\n compounds_dict_inx[u_ind],\n compound_divergence)\n\n divergence_scores_C[u_ind] = DC_VW\n else:\n divergence_scores_C[u_ind] = 0 # here we set compound divergence to zero for filtered items\n\n # At each iteration, the element u is selected such that\n # DC (V ∥W ) and DA (V ∥W ) are kept as closely as possible to the desired values.\n best_u_ind = max(divergence_scores_C, key=divergence_scores_C.get)\n u = U[best_u_ind]\n\n if divergence_scores_C[best_u_ind] <= 0.60:\n quit += 1\n\n if quit == 2:\n print(\"Terminating now...\")\n break\n\n V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V, CF_table_W, inx_U = add_to_split(S, V, W,\n V_idx, W_idx,\n AF_table_V,\n AF_table_W,\n CF_table_V,\n CF_table_W,\n atoms_dict_inx[best_u_ind],\n compounds_dict_inx[best_u_ind],\n u,\n best_u_ind,\n inx_U)\n\n print(\"%s : %s - Adding %s \\t to %s \\t with A: %.4f \\t with C: %.4f \\\n \\t Remaining: %s\" % (i, len(filtered_inx_U_by_A), u, S,\n divergence_scores_A[best_u_ind],\n divergence_scores_C[best_u_ind],\n # DA_VW,\n # DC_VW,\n len(inx_U)))\n\n\n\n if i % 50 == 0:\n if S == \"V\":\n rnd_u_ind = random.choice(V_idx)\n u = U[rnd_u_ind]\n\n else: # W:\n rnd_u_ind = random.choice(W_idx)\n u = U[rnd_u_ind]\n\n V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V,CF_table_W, inx_U = remove_from_split(S, V, W, V_idx, W_idx, AF_table_V, AF_table_W, CF_table_V,CF_table_W, atoms_dict_inx[rnd_u_ind], compounds_dict_inx[rnd_u_ind], u, rnd_u_ind, inx_U)\n print(\"%s Removing %s \\t from %s \\t with indice: %s \\t Remaining: %s\" % (i, u, S, rnd_u_ind,len(inx_U)))\n\n # Plot distribution of train and test splits while addding the new data\n # if i % 50 == 0:\n \n # plt.figure(figsize=(22,6))\n # plt.grid(alpha=0.1, linestyle='--', linewidth=1)\n # plt.hist([bins_A,bins_A], bins=bins_A, weights=[FA_V,FA_W], color=['blue', 'red'], alpha=0.5,\n # label=['FA_V', 'FA_W'])\n # plt.legend(loc='upper right')\n # plt.show()\n \n # plt.figure(figsize=(22,6))\n # plt.grid(alpha=0.1, linestyle='--', linewidth=1)\n # plt.hist([bins_C,bins_C], bins=bins_C, weights=[FC_V,FC_W], color=['green', 'orange'], alpha=0.5,\n # label=['FC_V', 'FC_W'])\n # plt.legend(loc='upper right')\n # plt.show()\n\n # TODO: Based on Atomic divergence < 0.02\n # To reduce the risk of being stuck in a local optimum, we also allow removing examples at certain iterations.\n\n if thr > 0 and thr == i:\n break\n\n if i % 100 == 0 and i != 0:\n\n U_items = list(U_ALL.items())\n\n V_items = [U_items[i][0] for i in V_idx]\n W_items = [U_items[i][0] for i in W_idx]\n\n train_idx = list(V_idx)\n val_idx, test_idx = train_test_split(W_idx, test_size=0.50, random_state=42)\n\n train_keys = list(V_items)\n val_keys = [W_items[W_idx.index(item)] for item in val_idx]\n test_keys = [W_items[W_idx.index(item)] for item in test_idx]\n\n X_train = []\n Y_train = []\n X_test = []\n Y_test = []\n X_val = []\n Y_val = []\n\n for z in train_idx:\n X_train.append(source[z])\n Y_train.append(target[z])\n\n for z in val_idx:\n X_val.append(source[z])\n Y_val.append(target[z])\n\n for z in test_idx:\n X_test.append(source[z])\n Y_test.append(target[z])\n\n save_json(train_keys, \"train_keys.json\")\n save_json(val_keys, \"val_keys.json\")\n save_json(test_keys, \"test_keys.json\")\n\n save_json(train_idx, \"train_idx.json\")\n save_json(val_idx, \"val_idx.json\")\n save_json(test_idx, \"test_idx.json\")\n\n with open('train.src', 'w') as f:\n for item in X_train:\n f.write(\"%s\\n\" % item)\n\n with open('val.src', 'w') as f:\n for item in X_val:\n f.write(\"%s\\n\" % item)\n\n with open('test.src', 'w') as f:\n for item in X_test:\n f.write(\"%s\\n\" % item)\n\n with open('train.trg', 'w') as f:\n for item in Y_train:\n f.write(\"%s\\n\" % item)\n\n with open('val.trg', 'w') as f:\n for item in Y_val:\n f.write(\"%s\\n\" % item)\n\n with open('test.trg', 'w') as f:\n for item in Y_test:\n f.write(\"%s\\n\" % item)\n\n freqs = {\"FA_V\":list(AF_table_V.values()), \"FA_W\":list(AF_table_W.values()),\n \"FC_V\":list(CF_table_V.values()), \"FC_W\":list(CF_table_W.values())}\n\n with open('frequencies.pkl', 'wb') as handle:\n pickle.dump(freqs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n if i % 500 == 0 and i != 0:\n\n os.mkdir(str(i))\n\n #V_items = [U[z][0] for z in V_idx]\n #W_items = [U[z][0] for z in W_idx]\n\n U_items = list(U_ALL.items())\n\n V_items = [U_items[i][0] for i in V_idx]\n W_items = [U_items[i][0] for i in W_idx]\n\n train_idx = list(V_idx)\n val_idx, test_idx = train_test_split(W_idx, test_size=0.50, random_state=42)\n\n train_keys = list(V_items)\n val_keys = [W_items[W_idx.index(item)] for item in val_idx]\n test_keys = [W_items[W_idx.index(item)] for item in test_idx]\n\n X_train = []\n Y_train = []\n X_test = []\n Y_test = []\n X_val = []\n Y_val = []\n\n for z in train_idx:\n X_train.append(source[z])\n Y_train.append(target[z])\n\n for z in val_idx:\n X_val.append(source[z])\n Y_val.append(target[z])\n\n for z in test_idx:\n X_test.append(source[z])\n Y_test.append(target[z])\n\n save_json(train_keys,os.path.join(str(i), \"train_keys.json\"))\n save_json(val_keys,os.path.join(str(i), \"val_keys.json\"))\n save_json(test_keys, os.path.join(str(i), \"test_keys.json\"))\n\n save_json(train_idx, os.path.join(str(i), \"train_idx.json\"))\n save_json(val_idx, os.path.join(str(i), \"val_idx.json\"))\n save_json(test_idx, os.path.join(str(i), \"test_idx.json\"))\n\n with open(os.path.join(str(i), 'train.src'), 'w') as f:\n for item in X_train:\n f.write(\"%s\\n\" % item)\n\n with open(os.path.join(str(i), 'val.src'), 'w') as f:\n for item in X_val:\n f.write(\"%s\\n\" % item)\n\n with open(os.path.join(str(i), 'test.src'), 'w') as f:\n for item in X_test:\n f.write(\"%s\\n\" % item)\n\n with open(os.path.join(str(i), 'train.trg'), 'w') as f:\n for item in Y_train:\n f.write(\"%s\\n\" % item)\n\n with open(os.path.join(str(i), 'val.trg'), 'w') as f:\n for item in Y_val:\n f.write(\"%s\\n\" % item)\n\n with open(os.path.join(str(i), 'test.trg'), 'w') as f:\n for item in Y_test:\n f.write(\"%s\\n\" % item)\n\n freqs = {\"FA_V\": list(AF_table_V.values()), \"FA_W\": list(AF_table_W.values()),\n \"FC_V\": list(CF_table_V.values()), \"FC_W\": list(CF_table_W.values())}\n\n with open(os.path.join(str(i),'frequencies.pkl'), 'wb') as handle:\n pickle.dump(freqs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n i = i + 1\n\n return V, V_idx, W, W_idx, list(AF_table_V.values()), list(AF_table_W.values()), list(CF_table_V.values()), list(CF_table_W.values())\n\n V, V_idx, W, W_idx, FA_V, FA_W, FC_V, FC_W = greedy_fast(thr)\n\n U_items = list(U_ALL.items())\n\n V_items = [U_items[i][0] for i in V_idx]\n W_items = [U_items[i][0] for i in W_idx]\n\n \"\"\"### Now generate the split based on the DBCA approach\"\"\"\n\n train_idx = list(V_idx)\n val_idx, test_idx = train_test_split(W_idx, test_size=0.50, random_state=42)\n\n train_keys = list(V_items)\n val_keys = [W_items[W_idx.index(item)] for item in val_idx]\n test_keys = [W_items[W_idx.index(item)] for item in test_idx]\n\n X_train = []\n Y_train = []\n X_test = []\n Y_test = []\n X_val = []\n Y_val = []\n\n for i in train_idx:\n X_train.append(source[i])\n Y_train.append(target[i])\n\n for i in val_idx:\n X_val.append(source[i])\n Y_val.append(target[i])\n\n for i in test_idx:\n X_test.append(source[i])\n Y_test.append(target[i])\n\n save_json(train_keys, \"train_keys.json\")\n save_json(val_keys, \"val_keys.json\")\n save_json(test_keys, \"test_keys.json\")\n\n save_json(train_idx, \"train_idx.json\")\n save_json(val_idx, \"val_idx.json\")\n save_json(test_idx, \"test_idx.json\")\n\n with open('train.src', 'w') as f:\n for item in X_train:\n f.write(\"%s\\n\" % item)\n\n with open('val.src', 'w') as f:\n for item in X_val:\n f.write(\"%s\\n\" % item)\n\n with open('test.src', 'w') as f:\n for item in X_test:\n f.write(\"%s\\n\" % item)\n\n with open('train.trg', 'w') as f:\n for item in Y_train:\n f.write(\"%s\\n\" % item)\n\n with open('val.trg', 'w') as f:\n for item in Y_val:\n f.write(\"%s\\n\" % item)\n\n with open('test.trg', 'w') as f:\n for item in Y_test:\n f.write(\"%s\\n\" % item)\n\n freqs = {\"FA_V\": FA_V, \"FA_W\": FA_W,\n \"FC_V\": FC_V, \"FC_W\": FC_W}\n\n with open('frequencies.pkl', 'wb') as handle:\n pickle.dump(freqs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == '__main__':\n print(\"starting split generation...\\n\\n\")\n generate(0)\n print(\"completed!\")\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
simona-mircheva/deep-learning-udacity | [
"d697fedd6829b715faf742be4d985fd4c2d9c664"
] | [
"finetune-a-cnn-2/finetune_a_cnn_solution.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport time # for measuring time for testing, remove for students\n\ndef test(model, test_loader, criterion, device):\n print(\"Testing Model on Whole Testing Dataset\")\n model.eval()\n running_loss=0\n running_corrects=0\n \n for inputs, labels in test_loader:\n inputs=inputs.to(device)\n labels=labels.to(device)\n outputs=model(inputs)\n loss=criterion(outputs, labels)\n _, preds = torch.max(outputs, 1)\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data).item()\n\n total_loss = running_loss / len(test_loader.dataset)\n total_acc = running_corrects/ len(test_loader.dataset)\n print(f\"Testing Accuracy: {100*total_acc}, Testing Loss: {total_loss}\")\n \ndef train(model, train_loader, validation_loader, criterion, optimizer, device):\n epochs=2\n best_loss=1e6\n image_dataset={'train':train_loader, 'valid':validation_loader}\n loss_counter=0\n \n for epoch in range(epochs):\n for phase in ['train', 'valid']:\n print(f\"Epoch {epoch}, Phase {phase}\")\n if phase=='train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n running_samples=0\n\n for step, (inputs, labels) in enumerate(image_dataset[phase]):\n inputs=inputs.to(device)\n labels=labels.to(device)\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n if phase=='train':\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n _, preds = torch.max(outputs, 1)\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data).item()\n running_samples+=len(inputs)\n if running_samples % 2000 == 0:\n accuracy = running_corrects/running_samples\n print(\"Images [{}/{} ({:.0f}%)] Loss: {:.2f} Accuracy: {}/{} ({:.2f}%)\".format(\n running_samples,\n len(image_dataset[phase].dataset),\n 100.0 * (running_samples / len(image_dataset[phase].dataset)),\n loss.item(),\n running_corrects,\n running_samples,\n 100.0*accuracy,\n )\n )\n \n #NOTE: Comment lines below to train and test on whole dataset\n if running_samples>(0.2*len(image_dataset[phase].dataset)):\n break\n\n epoch_loss = running_loss / running_samples\n epoch_acc = running_corrects / running_samples\n \n if phase=='valid':\n if epoch_loss<best_loss:\n best_loss=epoch_loss\n else:\n loss_counter+=1\n\n if loss_counter==1:\n break\n return model\n\ndef create_model():\n model = models.resnet18(pretrained=True)\n\n for param in model.parameters():\n param.requires_grad = False \n\n num_features=model.fc.in_features\n model.fc = nn.Sequential(\n nn.Linear(num_features, 10))\n return model\n\nbatch_size=10\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Running on Device {device}\")\n\ntraining_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\ntesting_transform = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=training_transform)\n\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True)\n\ntestset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=testing_transform)\n\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n shuffle=False)\n\nmodel=create_model()\nmodel=model.to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.fc.parameters(), lr=0.001)\n\ntrain(model, trainloader, testloader, criterion, optimizer, device)\n\ntest(model, testloader, criterion, device)"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.nn.Linear",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
batizty/pyfm | [
"80484adbf6e990e2f2981776e66f010254151cf9"
] | [
"pyfm/example/ml-1m.py"
] | [
"#!/usr/bin/env python2.7\n# -*- coding:utf-8 -*-\n\n__author__ = 'tuoyu'\n__desc__ = \"\"\"This file is to gererate libfm format for later trainning\"\"\"\n\nimport logging\nimport sys\nimport numpy as np\n\nfrom ..reader.pandas_reader import PandasReader\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\nlogger = logging.getLogger(__file__)\n\ndef creat_libfm_data():\n\t'''\n\tCreate libfm format data for later fm trainning\n\t'''\n\n\t# Common Constant Var\n\tdelimiter = '::'\n\t\n\t# Load User Data\n\tuser_schema = ['UserID', 'Genre', 'Age', 'Occupation', 'ZipCode']\n\tuser_data = PandasReader(delimiter=delimiter, schema=user_schema)\n\tuser_data.read('./data/ml-1m/users.dat')\n\tlogger.info(\"Loading user data OK\")\n\n\t# Load Moive Data\n\tmovie_schma = ['MovieID', 'Name', 'Genre_list']\n\tmovie_data = PandasReader(delimiter=delimiter, schema=movie_schma)\n\tmovie_data.read('./data/ml-1m/movies.dat')\n\tmovie_data.drop(['Name'])\n\tlogger.info(\"Loading movie data OK\")\n\n\t# Rating Data\n\trating_schma = ['UserID', 'MovieID', 'Ratings', 'Timestamp']\n\trating_data = PandasReader(delimiter=delimiter, schema=rating_schma)\n\trating_data.read('./data/ml-1m/ratings.dat')\n\tratings = rating_data.data()['Ratings']\n\trating_data.drop(['Ratings', 'Timestamp'])\n\tlogger.info(\"Loading rating data OK\")\n\n\tfeat = [\n\t\t('UserID', rating_data.data()),\n\t\t('MovieID', rating_data.data())\n\t\t# ('Genre', user_data.data()),\n\t\t# ('Age', user_data.data()),\n\t\t# ('Occupation', user_data.data())\n\t]\n\n\t# Offset array\n\toffset_array = [0]\n\tdict_array = []\n\tfor (feature_name, dataset) in feat:\n\t\tuniq = np.unique(dataset[feature_name])\n\t\t# according field different values to setting offset_array\n\t\toffset_array.append(len(uniq) + offset_array[-1])\n\t\t# setting revert search table for keys\n\t\tdict_array.append({ key : value + offset_array[-2] for value, key in enumerate(uniq) })\n\tlogger.info(\"Mapping data done\")\n\n\toutput_filename = 'libfm.dat'\n\twith open(output_filename, 'w') as f:\n\t\t# read ratings data\n\t\tfor i in range(rating_data.data().shape[0]):\n\t\t\t# just pick up ratings data as labels\n\t\t\ts = \"{0}\".format(ratings[i])\n\t\t\tsample = rating_data.data().loc[i]\n\n\t\t\t# add features data, using one-hot encoding\n\t\t\tfor index_feat, (feature_name, dataset) in enumerate(feat):\n\t\t\t\tfvalue = sample.get(feature_name)\n\t\t\t\tif fvalue:\n\t\t\t\t\tkey = dict_array[index_feat][fvalue]\n\t\t\t\t\tlogger.debug(\"feature_name : {} raw : {} value : {}\".format(feature_name, fvalue, key))\n\t\t\t\t\ts += \" {0}:1\".format( key )\n\t\t\ts += '\\n'\n\t\t\tf.write(s)\n\t\tlogger.info(\"Generate samples done\")\n\nif __name__ == '__main__':\n\tcreat_libfm_data()\n\n\n\n\n\t\n\n\n\t\t"
] | [
[
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
saraivaufc/Mask_RCNN | [
"a440e957c47598727bcc5e9f550f083e739e1d14"
] | [
"mrcnn/model.py"
] | [
"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport datetime\nimport re\nimport math\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras as keras\nimport tensorflow.keras.backend as K\nimport tensorflow.keras.layers as KL\nimport tensorflow.keras.utils as KU\nfrom tensorflow.python.eager import context\nimport tensorflow.keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 2.0+\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"2.0\")\n\ntf.compat.v1.disable_eager_execution()\n\n############################################################\n# Utility Functions\n############################################################\n\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KL.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def get_config(self):\n config = super(ProposalLayer, self).get_config()\n config[\"config\"] = self.config.to_dict()\n config[\"proposal_count\"] = self.proposal_count\n config[\"nms_threshold\"] = self.nms_threshold\n return config\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(input=anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(input=proposals)[0], 0)\n proposals = tf.pad(tensor=proposals, paddings=[(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n\n if not context.executing_eagerly():\n # Infer the static output shape:\n out_shape = self.compute_output_shape(None)\n proposals.set_shape(out_shape)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return None, self.proposal_count, 4\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.math.log(x) / tf.math.log(2.0)\n\n\nclass PyramidROIAlign(KL.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def get_config(self):\n config = super(PyramidROIAlign, self).get_config()\n config['pool_shape'] = self.pool_shape\n return config\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.compat.v1.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(input=box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n input=box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(input=boxes)[:2], tf.shape(input=pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(input=boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(input=boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(input=boxes1)[0], tf.shape(input=boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(input=proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(tensor=gt_class_ids, mask=non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.compat.v1.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.compat.v1.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.compat.v1.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(input_tensor=crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(input_tensor=overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.compat.v1.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.compat.v1.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random.shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(input=positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random.shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n pred=tf.greater(tf.shape(input=positive_overlaps)[1], 0),\n true_fn=lambda: tf.argmax(input=positive_overlaps, axis=1),\n false_fn=lambda: tf.cast(tf.constant([]), tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(a=gt_masks, perm=[2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(input=roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(input=negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(input=rois)[0], 0)\n rois = tf.pad(tensor=rois, paddings=[(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(tensor=roi_gt_boxes, paddings=[(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(tensor=roi_gt_class_ids, paddings=[(0, N + P)])\n deltas = tf.pad(tensor=deltas, paddings=[(0, N + P), (0, 0)])\n masks = tf.pad(tensor=masks, paddings=[[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KL.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def get_config(self):\n config = super(DetectionTargetLayer, self).get_config()\n config[\"config\"] = self.config.to_dict()\n return config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(input=probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.compat.v1.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.compat.v1.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse.to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.compat.v1.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=class_keep)[0]\n class_keep = tf.pad(tensor=class_keep, paddings=[(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.compat.v1.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse.to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(input=class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.dtypes.cast(tf.gather(class_ids, keep), tf.float32)[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(input=detections)[0]\n detections = tf.pad(tensor=detections, paddings=[(0, gap), (0, 0)], mode=\"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KL.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def get_config(self):\n config = super(DetectionLayer, self).get_config()\n config[\"config\"] = self.config.to_dict()\n return config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(input=t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n if s[1] is None:\n mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name=\"mrcnn_bbox\")(x)\n else:\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.compat.v1.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.compat.v1.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n\n loss = K.switch(tf.size(input=loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(input=pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(input_tensor=loss) / tf.reduce_sum(input_tensor=pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(input=target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(input=target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(input=pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(a=pred_masks, perm=[0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.compat.v1.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(input=y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augmentation=None):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if config.USE_MINI_MASK:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\nclass DataGenerator(KU.Sequence):\n \"\"\"An iterable that returns images and corresponding target class ids,\n bounding box deltas, and masks. It inherits from keras.utils.Sequence to avoid data redundancy\n when multiprocessing=True.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n\n Returns a Python iterable. Upon calling __getitem__() on it, the\n iterable returns two lists, inputs and outputs. The contents\n of the lists differ depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n\n def __init__(self, dataset, config, shuffle=True, augmentation=None,\n random_rois=0, detection_targets=False):\n\n self.image_ids = np.copy(dataset.image_ids)\n self.dataset = dataset\n self.config = config\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n self.backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n self.backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n self.shuffle = shuffle\n self.augmentation = augmentation\n self.random_rois = random_rois\n self.batch_size = self.config.BATCH_SIZE\n self.detection_targets = detection_targets\n\n def __len__(self):\n return int(np.ceil(len(self.image_ids) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n b = 0\n image_index = -1\n while b < self.batch_size:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(self.image_ids)\n\n if self.shuffle and image_index == 0:\n np.random.shuffle(self.image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = self.image_ids[image_index]\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(self.dataset, self.config, image_id,\n augmentation=self.augmentation)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,\n gt_class_ids, gt_boxes, self.config)\n\n # Mask R-CNN Targets\n if self.random_rois:\n rpn_rois = generate_random_rois(\n image.shape, self.random_rois, gt_class_ids, gt_boxes)\n if self.detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (self.batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [self.batch_size, self.anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [self.batch_size, self.config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (self.batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (self.batch_size, self.config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (self.batch_size, self.config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (self.batch_size, gt_masks.shape[0], gt_masks.shape[1],\n self.config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if self.random_rois:\n batch_rpn_rois = np.zeros(\n (self.batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if self.detection_targets:\n batch_rois = np.zeros(\n (self.batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (self.batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (self.batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (self.batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), self.config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if self.random_rois:\n batch_rpn_rois[b] = rpn_rois\n if self.detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if self.random_rois:\n inputs.extend([batch_rpn_rois])\n if self.detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n return inputs, outputs\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN(object):\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n # This class returns a constant layer\n class ConstLayer(tf.keras.layers.Layer):\n def __init__(self, x, name=None):\n super(ConstLayer, self).__init__(name=name)\n self.x = tf.Variable(x)\n\n def call(self, input):\n return self.x\n\n anchors = ConstLayer(anchors, name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n from tensorflow.python.keras.saving import hdf5_format\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n with h5py.File(filepath, mode='r') as f:\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n hdf5_format.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n hdf5_format.load_weights_from_hdf5_group(f, layers)\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(input_tensor=layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(input=w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(input_tensor=layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_metric(loss, name=name, aggregation='mean')\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n # Use string for regex since we might want to use pathlib.Path as model_path\n m = re.match(regex, str(model_path))\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = DataGenerator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation)\n val_generator = DataGenerator(val_dataset, self.config, shuffle=True)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name == 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=False\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n # if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n # inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n # if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n # model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(input_tensor=tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(tensor=boxes, mask=non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n"
] | [
[
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.image.non_max_suppression",
"tensorflow.minimum",
"tensorflow.keras.layers.Conv2DTranspose",
"numpy.where",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.Add",
"numpy.zeros",
"numpy.log",
"tensorflow.gather_nd",
"numpy.random.choice",
"tensorflow.exp",
"numpy.delete",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.split",
"numpy.array",
"numpy.sum",
"tensorflow.reduce_mean",
"numpy.random.shuffle",
"tensorflow.keras.backend.binary_crossentropy",
"numpy.expand_dims",
"tensorflow.concat",
"tensorflow.stack",
"numpy.concatenate",
"tensorflow.map_fn",
"tensorflow.pad",
"tensorflow.keras.layers.ZeroPadding2D",
"numpy.divide",
"tensorflow.boolean_mask",
"numpy.reshape",
"tensorflow.keras.regularizers.l2",
"tensorflow.gather",
"numpy.copy",
"numpy.argmax",
"tensorflow.compat.v1.where",
"tensorflow.unique",
"tensorflow.keras.layers.Dense",
"tensorflow.identity",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.round",
"tensorflow.reduce_max",
"tensorflow.sparse.to_dense",
"tensorflow.multiply",
"tensorflow.expand_dims",
"numpy.ones",
"tensorflow.keras.backend.equal",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.empty",
"tensorflow.keras.layers.Input",
"tensorflow.cast",
"tensorflow.image.crop_and_resize",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.add_n",
"numpy.random.randint",
"numpy.hstack",
"tensorflow.Variable",
"tensorflow.keras.backend.not_equal",
"tensorflow.squeeze",
"tensorflow.stop_gradient",
"tensorflow.keras.backend.reshape",
"tensorflow.argmax",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Model",
"tensorflow.shape",
"tensorflow.keras.backend.abs",
"tensorflow.keras.backend.sparse_categorical_crossentropy",
"tensorflow.random.shuffle",
"tensorflow.size",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.math.log",
"numpy.broadcast_to",
"tensorflow.keras.backend.mean",
"tensorflow.sqrt",
"tensorflow.abs",
"tensorflow.logical_and",
"numpy.amax",
"tensorflow.equal",
"tensorflow.keras.backend.less",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group_by_name",
"numpy.max",
"numpy.any",
"tensorflow.keras.layers.Concatenate",
"numpy.arange",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.backend.squeeze",
"tensorflow.divide",
"numpy.stack",
"tensorflow.nn.top_k",
"tensorflow.python.keras.saving.hdf5_format.load_weights_from_hdf5_group",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Activation",
"numpy.abs",
"tensorflow.maximum",
"numpy.sort",
"tensorflow.keras.backend.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
cherreggy/vq-vae-2-pytorch-master | [
"3dc4f814daeeccdc3111f18ec404e01c1f712abc"
] | [
"vqvae.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport distributed as dist_fn\n\n\n# Copyright 2018 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\n# Borrowed from https://github.com/deepmind/sonnet and ported it to PyTorch\n\n\nclass Quantize(nn.Module):\n def __init__(self, dim, n_embed, decay=0.99, eps=1e-5):\n super().__init__()\n\n self.dim = dim\n self.n_embed = n_embed\n self.decay = decay\n self.eps = eps\n\n embed = torch.randn(dim, n_embed)\n self.register_buffer(\"embed\", embed)\n self.register_buffer(\"cluster_size\", torch.zeros(n_embed))\n self.register_buffer(\"embed_avg\", embed.clone())\n\n def forward(self, input):\n flatten = input.reshape(-1, self.dim)\n dist = (\n flatten.pow(2).sum(1, keepdim=True)\n - 2 * flatten @ self.embed\n + self.embed.pow(2).sum(0, keepdim=True)\n )\n _, embed_ind = (-dist).max(1)\n embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)\n embed_ind = embed_ind.view(*input.shape[:-1])\n quantize = self.embed_code(embed_ind)\n\n if self.training:\n embed_onehot_sum = embed_onehot.sum(0)\n embed_sum = flatten.transpose(0, 1) @ embed_onehot\n\n dist_fn.all_reduce(embed_onehot_sum)\n dist_fn.all_reduce(embed_sum)\n\n self.cluster_size.data.mul_(self.decay).add_(\n embed_onehot_sum, alpha=1 - self.decay\n )\n self.embed_avg.data.mul_(self.decay).add_(embed_sum, alpha=1 - self.decay)\n n = self.cluster_size.sum()\n cluster_size = (\n (self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n\n )\n embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)\n self.embed.data.copy_(embed_normalized)\n\n diff = (quantize.detach() - input).pow(2).mean()\n quantize = input + (quantize - input).detach()\n\n return quantize, diff, embed_ind\n\n def embed_code(self, embed_id):\n return F.embedding(embed_id, self.embed.transpose(0, 1))\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channel, channel):\n super().__init__()\n\n self.conv = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(in_channel, channel, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, in_channel, 1),\n )\n\n def forward(self, input):\n out = self.conv(input)\n out += input\n\n return out\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride):\n super().__init__()\n\n if stride == 4:\n blocks = [\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel, channel, 3, padding=1),\n ]\n\n elif stride == 2:\n blocks = [\n nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // 2, channel, 3, padding=1),\n ]\n\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\n\nclass Decoder(nn.Module):\n def __init__(\n self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride\n ):\n super().__init__()\n\n blocks = [nn.Conv2d(in_channel, channel, 3, padding=1)]\n\n for i in range(n_res_block):\n blocks.append(ResBlock(channel, n_res_channel))\n\n blocks.append(nn.ReLU(inplace=True))\n\n if stride == 4:\n blocks.extend(\n [\n nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(\n channel // 2, out_channel, 4, stride=2, padding=1\n ),\n ]\n )\n\n elif stride == 2:\n blocks.append(\n nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1)\n )\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, input):\n return self.blocks(input)\n\n\nclass VQVAE(nn.Module):\n def __init__(\n self,\n in_channel=3,\n channel=128,\n n_res_block=2,\n n_res_channel=32,\n embed_dim=64,\n n_embed=512,\n decay=0.99,\n ):\n super().__init__()\n\n self.enc_b = Encoder(in_channel, channel, n_res_block, n_res_channel, stride=4)\n self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, stride=2)\n self.quantize_conv_t = nn.Conv2d(channel, embed_dim, 1)\n self.quantize_t = Quantize(embed_dim, n_embed)\n self.dec_t = Decoder(\n embed_dim, embed_dim, channel, n_res_block, n_res_channel, stride=2\n )\n self.quantize_conv_b = nn.Conv2d(embed_dim + channel, embed_dim, 1)\n self.quantize_b = Quantize(embed_dim, n_embed)\n self.upsample_t = nn.ConvTranspose2d(\n embed_dim, embed_dim, 4, stride=2, padding=1\n )\n self.dec = Decoder(\n embed_dim + embed_dim,\n in_channel,\n channel,\n n_res_block,\n n_res_channel,\n stride=4,\n )\n\n def forward(self, input):\n quant_t, quant_b, diff, _, _ = self.encode(input)\n dec = self.decode(quant_t, quant_b)\n\n return dec, diff\n\n def encode(self, input):\n enc_b = self.enc_b(input)\n enc_t = self.enc_t(enc_b)\n\n quant_t = self.quantize_conv_t(enc_t).permute(0, 2, 3, 1)\n quant_t, diff_t, id_t = self.quantize_t(quant_t)\n quant_t = quant_t.permute(0, 3, 1, 2)\n diff_t = diff_t.unsqueeze(0)\n\n dec_t = self.dec_t(quant_t)\n enc_b = torch.cat([dec_t, enc_b], 1)\n\n quant_b = self.quantize_conv_b(enc_b).permute(0, 2, 3, 1)\n quant_b, diff_b, id_b = self.quantize_b(quant_b)\n quant_b = quant_b.permute(0, 3, 1, 2)\n diff_b = diff_b.unsqueeze(0)\n\n return quant_t, quant_b, diff_t + diff_b, id_t, id_b\n\n def decode(self, quant_t, quant_b):\n upsample_t = self.upsample_t(quant_t)\n quant = torch.cat([upsample_t, quant_b], 1)\n dec = self.dec(quant)\n\n return dec\n\n def decode_code(self, code_t, code_b):\n quant_t = self.quantize_t.embed_code(code_t)\n quant_t = quant_t.permute(0, 3, 1, 2)\n quant_b = self.quantize_b.embed_code(code_b)\n quant_b = quant_b.permute(0, 3, 1, 2)\n\n dec = self.decode(quant_t, quant_b)\n\n return dec\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.zeros",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.functional.one_hot",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FarshidNooshi/CI_ANN_Fruit_Detector | [
"e027ab95e744f6421dee1ca9a96cac80ff89f881"
] | [
"Implementation/ANN/section_two/main.py"
] | [
"import time\n\nimport numpy as np\n\nfrom ANN.section_one.credentials import get_path_of_Datasets\nfrom ANN.section_one.utils.utilsV1 import load_data\nfrom ANN.section_two.utils.utilsV2 import L_layer_model\n\npath = get_path_of_Datasets()\ntrain_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig = load_data(path)\n\nx_train = np.zeros((102, 1962))\ny_train = np.zeros((4, 1962))\nfor i in range(1962):\n for j in range(102):\n x_train[j, i] = train_set_x_orig[i][j]\nfor i in range(1962):\n for j in range(4):\n y_train[j, i] = train_set_y_orig[i][j]\n\nx_section_three = x_train[:, 0:200]\ny_section_three = y_train[:, 0:200]\n\nstart_time = time.time()\nparameters = L_layer_model(x_section_three, y_section_three, [102, 150, 60, 4], num_epochs=5, print_cost=True)\nprint(\"\\n--- %s seconds ---\" % (time.time() - start_time))\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mkeguida/Open3D_2019-02-13 | [
"264a9bf79615d7a2143c0afed68cc83cf586e1cb"
] | [
"examples/Python/Basic/rgbd_tum.py"
] | [
"# Open3D: www.open3d.org\n# The MIT License (MIT)\n# See license file or visit www.open3d.org for details\n\n# examples/Python/Tutorial/Basic/rgbd_tum.py\n\nfrom open3d import *\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n print(\"Read TUM dataset\")\n color_raw = read_image(\"../../TestData/RGBD/other_formats/TUM_color.png\")\n depth_raw = read_image(\"../../TestData/RGBD/other_formats/TUM_depth.png\")\n rgbd_image = create_rgbd_image_from_tum_format(color_raw, depth_raw);\n print(rgbd_image)\n plt.subplot(1, 2, 1)\n plt.title('TUM grayscale image')\n plt.imshow(rgbd_image.color)\n plt.subplot(1, 2, 2)\n plt.title('TUM depth image')\n plt.imshow(rgbd_image.depth)\n plt.show()\n pcd = create_point_cloud_from_rgbd_image(rgbd_image, PinholeCameraIntrinsic(\n PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n # Flip it, otherwise the pointcloud will be upside down\n pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n draw_geometries([pcd])\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.title"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.