repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
zdadadaz/jcc_dfdc
[ "672b61771e22b369c7950c89299b0a7a2f7586ad", "672b61771e22b369c7950c89299b0a7a2f7586ad" ]
[ "tmp.py", "playground/tmp1.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 12 19:28:12 2020\n\n@author: zdadadaz\n\"\"\"\n\nimport json\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\n\n# dir_json = './../fb_whole/metadata_21.json'\n# train_list =[]\n# with open(dir_json) as json_file:\n# data = json.load(json_file)\n# train_list = pd.DataFrame.from_dict(data, orient='index')\n# train_list.reset_index(level=0, inplace=True)\n \n# train_list[train_list['label']=='REAL'].iloc[1]\n\nbase = pd.read_csv('submission_base.csv')\nmtcn = pd.read_csv('submission_mtcn.csv')\nwhole = pd.read_csv('metadata_small.csv')\n\nsLength = len(base['label'])\nbase['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)\nbase['original'] = pd.Series(np.random.randn(sLength), index=base.index)\nbase['folder'] = pd.Series(np.random.randn(sLength), index=base.index)\nbase['res'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['wrong'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['original'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['folder'] = pd.Series(np.random.randn(sLength), index=base.index)\nmtcn['res'] = pd.Series(np.random.randn(sLength), index=base.index)\n\nfor i in range(len(base)):\n print(str(i))\n fn = base.iloc[i][0]\n label = whole[whole['filename']==fn]['label']\n score =0\n origin = \"n\"\n folder = whole[whole['filename']==fn]['folder']\n if list(label)[0] ==\"FAKE\":\n score = 1\n origin = whole[whole['filename']==fn]['original']\n \n base['wrong'][i]= abs(score - base.iloc[i][1])>0.5\n base['original'][i]= list(origin)[0]\n base['folder'][i]= list(folder)[0]\n base['res'][i]= list(label)[0]\n \n mtcn['wrong'][i]= abs(score - mtcn.iloc[i][1])>0.5\n mtcn['original'][i]= list(origin)[0]\n mtcn['folder'][i]= list(folder)[0]\n mtcn['res'][i]= list(label)[0]\n \nfor i, d in base.groupby('res'):\n base['label'].plot(kind='hist', figsize=(15, 5), bins=20, alpha=0.8, title='base')\n plt.legend(['FAKE','REAL'])\nplt.show()\nfor i, d in base.groupby('res'):\n mtcn['label'].plot(kind='hist', figsize=(15, 5), bins=20, title='MTCNN', alpha=0.8)\n plt.legend(['FAKE','REAL'])\nplt.show()\n\nTP = sum(np.array(base['label']>0.5) & np.array(base['res']==\"FAKE\"))\nFP = sum(np.array(base['label']>0.5) & np.array(base['res']==\"REAL\"))\nTN = sum(np.array(base['label']<=0.5) & np.array(base['res']==\"FAKE\"))\nFN = sum(np.array(base['label']<=0.5) & np.array(base['res']==\"REAL\"))\nprecision = TP/len(base)*2\nrecall = TP/(TP+FP)\nFake_precision = TP/(TP+TN)\nReal_precision = FN/(FP+FN)", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 17 18:09:13 2020\n\n@author: zdadadaz\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import Embedding\nfrom keras.layers import LSTM\nfrom keras.layers import Conv1D, MaxPooling1D\nfrom keras.datasets import imdb\nfrom keras import backend as K\nimport numpy as np\n# Embedding\nmax_features = 20000\nmaxlen = 100\nembedding_size = 128\n\n# Convolution\nkernel_size = 5\nfilters = 64\npool_size = 4\n\n# LSTM\nlstm_output_size = 70\n\n# Training\nbatch_size = 30\nepochs = 2\n\n'''\nNote:\nbatch_size is highly sensitive.\nOnly 2 epochs are needed as the dataset is very small.\n'''\n\nprint('Loading data...')\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(x_train), 'train sequences')\nprint(len(x_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\nprint('x_train shape:', x_train.shape)\nprint('x_test shape:', x_test.shape)\n\nprint('Build model...')\n\nmodel = Sequential()\nmodel.add(Embedding(max_features, embedding_size, input_length=maxlen))\nmodel.add(Dropout(0.25))\nmodel.add(Conv1D(filters,\n kernel_size,\n padding='valid',\n activation='relu',\n strides=1))\nmodel.add(MaxPooling1D(pool_size=pool_size))\n# model.add(LSTM(lstm_output_size))\n# model.add(Dense(1))\n# model.add(Activation('sigmoid'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n\ninput_shape = (1,100)\ninp = model.input # input placeholder\noutputs = [layer.output for layer in model.layers] # all layer outputs\nfunctors = [K.function([inp], [out]) for out in outputs] \n\ntest = np.random.random(input_shape)[np.newaxis,...]\nlayer_outs = [func([test]) for func in functors]\n# print (layer_outs)\nprint(outputs[-1])\n\n# print('Train...')\n# model.fit(x_train, y_train,\n# batch_size=batch_size,\n# epochs=epochs,\n# validation_data=(x_test, y_test))\n# score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)\n# print('Test score:', score)\n# print('Test accuracy:', acc)" ]
[ [ "pandas.read_csv", "matplotlib.pylab.show", "numpy.random.randn", "matplotlib.pylab.legend", "numpy.array" ], [ "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
antonvs88/crowddynamics-research
[ "61260aa26a6d5bc213252bf96eaa472a551918e3" ]
[ "data_analysis/calculate_field_data.py" ]
[ "from scipy.spatial import Voronoi, voronoi_plot_2d\n\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nfrom shapely.geometry import Polygon, MultiLineString, Point\nfrom shapely.ops import polygonize\nfrom descartes import PolygonPatch\nfrom voronoi_finite_polygons_2d import voronoi_finite_polygons_2d\nfrom recursive_mean import recursive_mean\n\n# Bound box representing the room. Used later in making Voronoi tessalation.\nwidth = 20\nheight = 20\nboundbox = Polygon([(0, 0), (0, height), (width, height), (width, 0)])\n\n# Create a grid structure over the room geometry.\n# Cell size in the grid, determines the resolution of the micro-macro converted data\ncell_size = 0.1\nm = np.round(width / cell_size)\nn = np.round(height / cell_size)\nm = m.astype(int)\nn = n.astype(int)\nX = np.linspace(0, width, m + 1)\nY = np.linspace(0, height, n + 1)\nhlines = [((x1, yi), (x2, yi)) for x1, x2 in zip(X[:-1], Y[1:]) for yi in Y]\nvlines = [((xi, y1), (xi, y2)) for y1, y2 in zip(Y[:-1], Y[1:]) for xi in X]\ngrids = list(polygonize(MultiLineString(hlines + vlines)))\n\n# The data is divided into four intervals. The number of pedestrians in the room determines the intervals.\n# The data when the 10 first and 10 last pedestrians leave the room is omitted to get rid of transient\n# behavior of the \"crowd system\".\ninterval1_start = 190\ninterval2_start = 145\ninterval3_start = 100\ninterval4_start = 55\ninterval4_end = 10\n\n# These should be the midpoints of the cells\nmid_x, mid_y = np.meshgrid(np.arange(cell_size/2, width, cell_size), np.arange(cell_size/2, height, cell_size))\n# The vector in each cell, pointing from the midpoint of the cell to the middle of the exit.\n# Used later in calculating the radial speed.\ndirection = np.zeros((mid_x.shape[0],mid_x.shape[0],2))\ndirection[:, :, 0] = mid_x - 20\ndirection[:, :, 1] = mid_y - 10\nd_norm = np.sqrt(direction[:,:,0] * direction[:,:,0] + direction[:,:,1] * direction[:,:,1])\n\n# We will calculate mean values of some part of the data recursively by taking a \"chunk\" of the data.\nchunk = 1000 # chunk size\n\n# The outer loop goes through the folders. The data from the simulations should be stored there in .npy.gz format.\nmylist = ['taset0'] # name of the folder, where the data is; can be an array of folders\nfor i in range(0, len(mylist)):\n\n # The inner loop goes through the simulations (in this case it goes through just one simulation)\n for j in range(int(sys.argv[1]), int(sys.argv[1]) + 1):\n\n\t# Data of pedestrians in the room at different times (0=\"not in room\", 1=\"in room\").\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz')):\n in_room = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'in_room1', j, '.npy.gz'))\n\n # Calculate number of pedestrians in room at different times\n sum_in_room = np.sum(in_room, axis=1)\n\n # The time steps when there are 190 pedestrians in the room\n time_interval1_start = np.where(sum_in_room == interval1_start)\n\n # Take the first instant when there are 190 pedestrians in the room.\n #\n # If there are no time steps when there are 190 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously, and thus the number of pedestrians go from 191 to 189), take the times when\n # there are 189 pedestrians in the room.\n if np.size(time_interval1_start) == 0:\n time_interval1_start = np.where(sum_in_room == (interval1_start - 1))[0][0]\n else:\n time_interval1_start = np.where(sum_in_room == interval1_start)[0][0]\n\n # The time steps when there are 145 pedestrians in the room\n time_interval2_start = np.where(sum_in_room == interval2_start)\n\n # Take the first instant when there are 145 pedestrians in the room.\n #\n # If there are no time steps when there are 145 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 146 to 144), take the times when\n # there are 144 pedestrians in the room.\n if np.size(time_interval2_start) == 0:\n time_interval2_start = np.where(sum_in_room == (interval2_start - 1))[0][0]\n else:\n time_interval2_start = np.where(sum_in_room == interval2_start)[0][0]\n\n # The time steps when there are 100 pedestrians in the room\n time_interval3_start = np.where(sum_in_room == interval3_start)\n\n # Take the first instant when there are 100 pedestrians in the room.\n #\n # If there are no time steps when there are 100 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 101 to 99), take the times when\n # there are 99 pedestrians in the room.\n if np.size(time_interval3_start) == 0:\n time_interval3_start = np.where(sum_in_room == (interval3_start - 1))[0][0]\n else:\n time_interval3_start = np.where(sum_in_room == interval3_start)[0][0]\n\n # The time steps when there are 55 pedestrians in the room\n time_interval4_start = np.where(sum_in_room == interval4_start)\n\n # Take the first instant when there are 55 pedestrians in the room.\n #\n # If there is no time steps when there are 55 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 56 to 54), take the times when\n # there are 54 pedestrians in the room.\n if np.size(time_interval4_start) == 0:\n time_interval4_start = np.where(sum_in_room == (interval4_start - 1))[0][0]\n else:\n time_interval4_start = np.where(sum_in_room == interval4_start)[0][0]\n\n # The time steps when there 10 pedestrians in the room\n time_interval4_end = np.where(sum_in_room == interval4_end)\n\n # Take the first instant when there are 10 pedestrians in the room.\n #\n # If there are no time steps when there are 10 pedestrians in the room (because two pedestrians have\n # evacuated simultaneously and the number of pedestrians go from 11 to 9), take the times when\n # there are 9 pedestrians in the room.\n if np.size(time_interval4_end) == 0:\n time_interval4_end = np.where(sum_in_room == (interval4_end - 1))[0][0]\n else:\n time_interval4_end = np.where(sum_in_room == interval4_end)[0][0]\n\n\t# Data of x-positions of pedestrians at different times.\n # NOTE! The data is sampled at a finer resolution, thus we take only every second element of the array.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz')):\n positions_x = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_x', j, '.npy.gz'))\n positions_x = positions_x[0::2] # take every second element\n\n # Data of y-positions of pedestrians at different times.\n # NOTE! The data is sampled at a finer resolution, thus we take only every second element of the array.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz')):\n positions_y = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'positions_y', j, '.npy.gz'))\n positions_y = positions_y[0::2] # take every second element\n\n # Data of pedestrians' velocities x-component at different times.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz')):\n velocities_x = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_x', j, '.npy.gz'))\n\n # Data of pedestrians' velocities y-component at different times.\n if os.path.exists(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz')):\n velocities_y = np.loadtxt(\"{}{}{}{}{}{}\".format('simulation_data/', mylist[i], '/', 'velocities_y', j, '.npy.gz'))\n\n # Arrays to save the micro-macro converted data\n velocity_x = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # velocity x-component\n velocity_y = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # velocity y-component\n speed = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # speed\n density = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # density\n projection = np.zeros((time_interval4_end - time_interval1_start, n, m), dtype=np.float16) # radial speed\n\n\t# Loop through the data when the number of pedestrians in the room goes from 190 to 10.\n # Using the Voronoi-method derive the macroscopic quantities.\n for t in range(time_interval1_start, time_interval4_end):\n\n # Positions of pedestrians inside the room\n agents_in_room = np.where(in_room[t, :] == 1)[0] # which pedestrians are in the room\n n_agents_in_room = len(agents_in_room) # number of pedestrians in the room\n points = np.concatenate((np.reshape(positions_x[t, agents_in_room], (n_agents_in_room, 1)),\n np.reshape(positions_y[t, agents_in_room], (n_agents_in_room, 1))), axis=1)\n\n # x- and y-components of velocities of pedestrians in room\n x_component = velocities_x[t, agents_in_room]\n y_component = velocities_y[t, agents_in_room]\n\n # Create a Voronoi tessalation from pedestrian center points\n vor = Voronoi(points)\n\n # Add also the Voronoi regions on the rim to the tessalation\n #\n # new_vertices contains all the vertices in the tessalation\n # new_regions contains the vertices used for each Voronoi area\n #\n # https://stackoverflow.com/questions/20515554/colorize-voronoi-diagram\n # https://gist.github.com/pv/8036995\n new_regions, new_vertices = voronoi_finite_polygons_2d(vor)\n\n # Loop through the Voronoi tessalations and calculate the density for each cell in the grid \n # (Steffen B, Seyfried A (2010) Methods for measuring pedestrian density, flow, speed and direction\n # with minimal scatter. Physica A: Statistical mechanics and its applications 389(9):1902-1910)\n for r in range(0, len(new_regions)):\n region = new_regions[r]\n # Shapely Polygon object from Voronoi cell\n voronoi_cell = Polygon(shell=new_vertices[region]) & boundbox\n\n # Area of the Voronoi cell\n vor_area = voronoi_cell.area\n\n # Calculate minimal and maximal x- and y-coordinate values of the Voronoi cell\n minx, miny, maxx, maxy = voronoi_cell.bounds\n # Round the minimal and maximal values to belong to a cell in the square grid\n minx, miny, maxx, maxy = np.round(\n (minx / cell_size, miny / cell_size, maxx / cell_size, maxy / cell_size)).astype(int)\n\n\t\t# Make sure that min and max values don't get out of bounds.\n minx = np.maximum(0, minx - 1)\n miny = np.maximum(0, miny - 1)\n maxx = np.minimum(m, maxx + 1)\n maxy = np.minimum(n, maxy + 1)\n\n # Loop over cells in the grid intersecting with the Voronoi cell.\n for x in range(minx, maxx):\n for y in range(miny, maxy):\n intersect_area = grids[x * n + y].intersection(voronoi_cell).area # intersecting area\n # Calculate the contribution of the pedestrian to the density and velocity in the grid cell.\n density[t - time_interval1_start, y, x] += intersect_area / vor_area\n velocity_x[t - time_interval1_start, y, x] += intersect_area * x_component[r]\n velocity_y[t - time_interval1_start, y, x] += intersect_area * y_component[r]\n\n # Finalize calculating the weighted density and velocity in the cell, by dividing it by the cell area\n density[t - time_interval1_start, :, :] /= cell_size * cell_size\n velocity_x[t - time_interval1_start, :, :] /= cell_size * cell_size\n velocity_y[t - time_interval1_start, :, :] /= cell_size * cell_size\n\n # Flip the density matrix upside down because of peculiar indexing in python\n density[t - time_interval1_start, :, :] = np.flipud(density[t - time_interval1_start, :, :])\n velocity_x[t - time_interval1_start, :, :] = np.flipud(velocity_x[t - time_interval1_start, :, :])\n velocity_y[t - time_interval1_start, :, :] = np.flipud(velocity_y[t - time_interval1_start, :, :])\n\n # Calculate speed in cells from the resultant velocity vectors\n speed[t - time_interval1_start, :, :] = np.hypot(velocity_x[t - time_interval1_start, :, :],\n velocity_y[t - time_interval1_start, :, :])\n\n # Radial speed (calculate projections of actualized velocities on desired velocities)\n projection[t - time_interval1_start, :, :] = (velocity_x[t - time_interval1_start, :, :] *\n direction[:, :, 0] + velocity_y[t -\n time_interval1_start, :, :] *\n direction[:, :, 1]) / d_norm\n\n # Save the length of the time intervals\n intervals = np.array((time_interval2_start - time_interval1_start, time_interval3_start - time_interval2_start,\n time_interval4_start - time_interval3_start, time_interval4_end - time_interval4_start))\n np.save(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'intervals', j, '.npy'), intervals)\n\n # Save the macroscopic data of speed, density and radial speed in .hdf5 format for each time interval\n # NOTE: The data is not averaged over time. The averaging is done in \"average_fields.py\". If one wants\n # to save space the averaging should be performed already in this code.\n\n # First interval (190...145 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval1', j, '.hdf5'), 'w') as hf1:\n hf1.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval1', j, '.npy.gz'),\n data=speed[time_interval1_start - time_interval1_start:\n time_interval2_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval1', j, '.hdf5')) as hf2:\n hf2.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval1', j, '.npy.gz'),\n data=density[time_interval1_start - time_interval1_start:\n time_interval2_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval1', j, '.hdf5')) as hf3:\n hf3.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval1', j, '.npy.gz'),\n data=projection[time_interval1_start - time_interval1_start:\n time_interval2_start - time_interval1_start, :, :])\n\n # Second interval (145...100 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval2', j, '.hdf5'), 'w') as hf4:\n hf4.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval2', j, '.npy.gz'),\n data=speed[time_interval2_start - time_interval1_start:\n time_interval3_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval2', j, '.hdf5')) as hf5:\n hf5.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval2', j, '.npy.gz'),\n data=density[time_interval2_start - time_interval1_start:\n time_interval3_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval2', j, '.hdf5')) as hf6:\n hf6.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval2', j, '.npy.gz'),\n data=projection[time_interval2_start - time_interval1_start:\n time_interval3_start - time_interval1_start, :, :])\n\n\n # First interval (100...55 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval3', j, '.hdf5'), 'w') as hf7:\n hf7.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval3', j, '.npy.gz'),\n data=speed[time_interval3_start - time_interval1_start:\n time_interval4_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval3', j, '.hdf5')) as hf8:\n hf8.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval3', j, '.npy.gz'),\n data=density[time_interval3_start - time_interval1_start:\n time_interval4_start - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval3', j, '.hdf5')) as hf9:\n hf9.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval3', j, '.npy.gz'),\n data=projection[time_interval3_start - time_interval1_start:\n time_interval4_start - time_interval1_start, :, :])\n\n # First interval (190...145 agents in the room)\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval4', j, '.hdf5'), 'w') as hf10:\n hf10.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'speed_interval4', j, '.npy.gz'),\n data=speed[time_interval4_start - time_interval1_start:\n time_interval4_end - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval4', j, '.hdf5')) as hf11:\n hf11.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'density_interval4', j, '.npy.gz'),\n data=density[time_interval4_start - time_interval1_start:\n time_interval4_end - time_interval1_start, :, :])\n with h5py.File(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval4', j, '.hdf5')) as hf12:\n hf12.create_dataset(\"{}{}{}{}{}{}\".format('fields/', mylist[i], '/', 'projection_interval4', j, '.npy.gz'),\n data=projection[time_interval4_start - time_interval1_start:\n time_interval4_end - time_interval1_start, :, :])\n\n\n" ]
[ [ "scipy.spatial.Voronoi", "numpy.maximum", "numpy.sqrt", "numpy.linspace", "numpy.minimum", "numpy.reshape", "numpy.arange", "numpy.flipud", "numpy.round", "numpy.size", "numpy.where", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.hypot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
doublefloyd/beluga
[ "740bda376634945ef51bf1cf946fcbe002e9bc7f", "740bda376634945ef51bf1cf946fcbe002e9bc7f" ]
[ "beluga/numeric/compilation/component_compilation.py", "examples/Oscillators/Rayleigh/plotresults.py" ]
[ "import numpy as np\nfrom scipy.integrate import simps\n\nfrom beluga.numeric.compilation import jit_lambdify, jit_compile_func\nfrom beluga.symbolic.data_classes.components_structures import CostStruct\n\n\ndef compile_control(control_options, args, ham_func, lambdify_func=jit_lambdify):\n\n num_options = len(control_options)\n\n if num_options == 0:\n return None\n\n elif num_options == 1:\n compiled_option = lambdify_func(args, control_options[0])\n\n def calc_u(_y, _p, _k):\n return np.array(compiled_option(_y, _p, _k))\n\n else:\n compiled_options = lambdify_func(args, control_options)\n\n def calc_u(_y, _p, _k):\n u_set = np.array(compiled_options(_y, _p, _k))\n\n u = u_set[0, :]\n ham = ham_func(_y, u, _p, _k)\n for n in range(1, num_options):\n ham_i = ham_func(_y, u_set[n, :], _p, _k)\n if ham_i < ham:\n u = u_set[n, :]\n ham = ham_i\n\n return u\n\n return jit_compile_func(calc_u, args, func_name='control_function')\n\n\ndef compile_cost(symbolic_cost: CostStruct, dynamic_args, bc_args, lambdify_func=jit_lambdify):\n\n compute_initial_cost = lambdify_func(bc_args, symbolic_cost.initial)\n compute_terminal_cost = lambdify_func(bc_args, symbolic_cost.terminal)\n compute_path_cost = lambdify_func(dynamic_args, symbolic_cost.path)\n\n def compute_cost(_t, _y, _q, _u, _p, _k):\n\n if len(_q) > 0:\n cost = compute_initial_cost(_y[0, :], _q[0, :], _p, _k) \\\n + compute_terminal_cost(_y[-1, :], _q[-1, :], _p, _k)\n else:\n cost = compute_initial_cost(_y[0, :], _q, _p, _k) + compute_terminal_cost(_y[-1, :], _q, _p, _k)\n\n path_cost = np.array([compute_path_cost(yi, ui, _p, _k) for yi, ui in zip(_y, _u)])\n cost += simps(path_cost, _t, even='last')\n\n return cost\n\n return compute_cost\n", "from beluga.utils import load\nimport matplotlib.pyplot as plt\n\ndata = load('data.beluga')\nsol_set = data['solutions']\ntraj = sol_set[-1][-1]\n\ncontinuation = sol_set[-1]\nL = len(continuation)\n\nplt.figure()\nfor ind, sol in enumerate(continuation):\n plt.plot(sol.t, sol.y[:, 0], color=(1*(ind/L), 1*(L-ind)/L, 0))\n plt.plot(sol.t, sol.y[:, 1], color=(0, 1*(L-ind)/L, 1*(ind/L)))\nplt.xlabel('Time [s]')\nplt.ylabel('State Variables')\nplt.grid(True)\n\nplt.figure()\nfor ind, sol in enumerate(continuation):\n plt.plot(sol.y[:, 0], sol.y[:, 1], color=(0, 1*(L-ind)/L, 1*(ind/L)))\nplt.xlabel('$y_1$')\nplt.ylabel('$y_2$')\nplt.grid(True)\n\nplt.figure()\nfor ind, sol in enumerate(continuation):\n plt.plot(sol.t, sol.u[:, 0], color=(0, 1*(L-ind)/L, 1*(ind/L)))\n\nplt.plot(traj.t, -traj.y[:,0]/6, color='k', linestyle='--')\nplt.plot(traj.t, -traj.y[:,0]/6 - 1, color='k', linestyle='--')\nplt.grid(True)\nplt.xlabel('Time [s]')\nplt.ylabel('Control')\n\nplt.figure()\nfor ind, sol in enumerate(continuation):\n plt.plot(sol.t, sol.dual[:, 0], color=(1*(ind/L), 1*(L-ind)/L, 0), label=r'$\\lambda_{y_1}$')\n plt.plot(sol.t, sol.dual[:, 1], color=(0, 1*(L-ind)/L, 1*(ind/L)), label=r'$\\lambda_{y_2}$')\nplt.grid(True)\nplt.xlabel('Time [s]')\nplt.ylabel('Costate Variables')\n\nplt.figure()\nfor ind, sol in enumerate(continuation):\n plt.plot(sol.t, sol.u[:, 0] + sol.y[:, 0]/6, color=(0, 1*(L-ind)/L, 1*(ind/L)))\n\nplt.plot([traj.t[0], traj.t[-1]], [0, 0], color='k', linestyle='--')\nplt.plot([traj.t[0], traj.t[-1]], [-1, -1], color='k', linestyle='--')\nplt.grid(True)\nplt.xlabel('Time [s]')\nplt.ylabel('Path-constraint')\nplt.show()\n" ]
[ [ "scipy.integrate.simps" ], [ "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ptrbortolotti/WISDEM
[ "2b7e44716d022e2f62140073dd078c5deeb8bf0a", "2b7e44716d022e2f62140073dd078c5deeb8bf0a", "2b7e44716d022e2f62140073dd078c5deeb8bf0a", "2b7e44716d022e2f62140073dd078c5deeb8bf0a" ]
[ "wisdem/drivetrainse/rna.py", "wisdem/aeroelasticse/Turbsim_mdao/wind_profile_writer.py", "wisdem/test/test_pymap/main.py", "wisdem/test/test_orbit/phases/install/cable_install/test_array_install.py" ]
[ "from __future__ import print_function\nimport numpy as np\nfrom openmdao.api import ExplicitComponent, Group, IndepVarComp\n\nfrom wisdem.commonse.utilities import hstack, vstack\nfrom wisdem.commonse.csystem import DirectionVector\nfrom wisdem.commonse import gravity\n\n# This is an extremely simple RNA mass calculator that should be used when DriveSE otherwise seems too complicated\n\n\nclass RNAMass(ExplicitComponent):\n def setup(self):\n\n # variables\n self.add_input('blades_mass', 0.0, units='kg', desc='mass of all blade')\n self.add_input('hub_mass', 0.0, units='kg', desc='mass of hub')\n self.add_input('nac_mass', 0.0, units='kg', desc='mass of nacelle')\n\n self.add_input('hub_cm', np.zeros(3), units='m', desc='location of hub center of mass relative to tower top in yaw-aligned c.s.')\n self.add_input('nac_cm', np.zeros(3), units='m', desc='location of nacelle center of mass relative to tower top in yaw-aligned c.s.')\n\n # order for all moments of inertia is (xx, yy, zz, xy, xz, yz) in the yaw-aligned coorinate system\n self.add_input('blades_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of all blades about hub center')\n self.add_input('hub_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of hub about its center of mass')\n self.add_input('nac_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of nacelle about its center of mass')\n\n # outputs\n self.add_output('rotor_mass', 0.0, units='kg', desc='mass of blades and hub')\n self.add_output('rna_mass', 0.0, units='kg', desc='total mass of RNA')\n self.add_output('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')\n self.add_output('rna_I_TT', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of RNA about tower top in yaw-aligned coordinate system')\n\n self.declare_partials('*','*')\n\n def _assembleI(self, I):\n Ixx, Iyy, Izz, Ixy, Ixz, Iyz = I[0], I[1], I[2], I[3], I[4], I[5] \n return np.array([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])\n\n\n def _unassembleI(self, I):\n return np.array([I[0, 0], I[1, 1], I[2, 2], I[0, 1], I[0, 2], I[1, 2]])\n\n\n def compute(self, inputs, outputs):\n\n rotor_mass = inputs['blades_mass'] + inputs['hub_mass']\n nac_mass = inputs['nac_mass']\n\n # rna mass\n outputs['rotor_mass'] = rotor_mass\n outputs['rna_mass'] = rotor_mass + nac_mass\n\n # rna cm\n outputs['rna_cm'] = (rotor_mass*inputs['hub_cm'] + nac_mass*inputs['nac_cm'])/outputs['rna_mass']\n\n #TODO check if the use of assembleI and unassembleI functions are correct\n # rna I\n blades_I = self._assembleI(inputs['blades_I'])\n hub_I = self._assembleI(inputs['hub_I'])\n nac_I = self._assembleI(inputs['nac_I'])\n rotor_I = blades_I + hub_I\n\n R = inputs['hub_cm']\n rotor_I_TT = rotor_I + rotor_mass*(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n\n R = inputs['nac_cm']\n nac_I_TT = nac_I + inputs['nac_mass']*(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n\n outputs['rna_I_TT'] = self._unassembleI(rotor_I_TT + nac_I_TT)\n\n\n def compute_partials(self, inputs, J):\n\n blades_mass = inputs['blades_mass']\n hub_mass = inputs['hub_mass']\n nac_mass = inputs['nac_mass']\n hub_cm = inputs['hub_cm']\n nac_cm = inputs['nac_cm']\n hub_I = inputs['hub_I']\n nac_I = inputs['nac_I']\n rotor_mass = blades_mass+hub_mass\n rna_mass = rotor_mass + nac_mass\n\n \n\n # mass\n J['rotor_mass', 'blades_mass'] = 1.0\n J['rotor_mass', 'hub_mass'] = 1.0\n J['rotor_mass', 'nac_mass'] = 0.0\n J['rotor_mass', 'hub_cm'] = np.zeros(3)\n J['rotor_mass', 'nac_cm'] = np.zeros(3)\n J['rotor_mass', 'blades_I'] = np.zeros(6)\n J['rotor_mass', 'hub_I'] = np.zeros(6)\n J['rotor_mass', 'nac_I'] = np.zeros(6)\n\n J['rna_mass', 'blades_mass'] = 1.0\n J['rna_mass', 'hub_mass'] = 1.0\n J['rna_mass', 'nac_mass'] = 1.0\n J['rna_mass', 'hub_cm'] = np.zeros(3)\n J['rna_mass', 'nac_cm'] = np.zeros(3)\n J['rna_mass', 'blades_I'] = np.zeros(6)\n J['rna_mass', 'hub_I'] = np.zeros(6)\n J['rna_mass', 'nac_I'] = np.zeros(6)\n \n\n # cm\n numerator = (blades_mass+hub_mass)*hub_cm+nac_mass*nac_cm\n\n J['rna_cm', 'blades_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2\n J['rna_cm', 'hub_mass'] = (rna_mass*hub_cm-numerator)/rna_mass**2\n J['rna_cm', 'nac_mass'] = (rna_mass*nac_cm-numerator)/rna_mass**2\n J['rna_cm', 'hub_cm'] = rotor_mass/rna_mass*np.eye(3)\n J['rna_cm', 'nac_cm'] = nac_mass/rna_mass*np.eye(3)\n J['rna_cm', 'blades_I'] = np.zeros((3, 6))\n J['rna_cm', 'hub_I'] = np.zeros((3, 6))\n J['rna_cm', 'nac_I'] = np.zeros((3, 6))\n\n\n # I\n R = hub_cm\n const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n\n J['rna_I_TT', 'blades_mass'] = const\n J['rna_I_TT', 'hub_mass'] = const\n dI_drx = rotor_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))\n dI_dry = rotor_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))\n dI_drz = rotor_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))\n J['rna_I_TT', 'hub_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T\n\n R = nac_cm\n const = self._unassembleI(np.dot(R, R)*np.eye(3) - np.outer(R, R))\n J['rna_I_TT', 'nac_mass'] = const\n dI_drx = nac_mass*self._unassembleI(2*R[0]*np.eye(3) - np.array([[2*R[0], R[1], R[2]], [R[1], 0.0, 0.0], [R[2], 0.0, 0.0]]))\n dI_dry = nac_mass*self._unassembleI(2*R[1]*np.eye(3) - np.array([[0.0, R[0], 0.0], [R[0], 2*R[1], R[2]], [0.0, R[2], 0.0]]))\n dI_drz = nac_mass*self._unassembleI(2*R[2]*np.eye(3) - np.array([[0.0, 0.0, R[0]], [0.0, 0.0, R[1]], [R[0], R[1], 2*R[2]]]))\n J['rna_I_TT', 'nac_cm'] = np.vstack([dI_drx, dI_dry, dI_drz]).T\n\n J['rna_I_TT', 'blades_I'] = np.eye(6)\n J['rna_I_TT', 'hub_I'] = np.eye(6)\n J['rna_I_TT', 'nac_I'] = np.eye(6)\n\n \n\n\nclass RotorLoads(ExplicitComponent):\n def setup(self):\n\n # variables\n self.add_input('F', np.zeros(3), units='N', desc='forces in hub-aligned coordinate system')\n self.add_input('M', np.zeros(3), units='N*m', desc='moments in hub-aligned coordinate system')\n self.add_input('hub_cm', np.zeros(3), units='m', desc='position of rotor hub relative to tower top in yaw-aligned c.s.')\n self.add_input('rna_mass', 0.0, units='kg', desc='mass of rotor nacelle assembly')\n self.add_input('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')\n\n # # These are used for backwards compatibility - do not use\n # T = Float(iotype='in', desc='thrust in hub-aligned coordinate system') # THIS MEANS STILL YAWED THOUGH (Shaft tilt)\n # Q = Float(iotype='in', desc='torque in hub-aligned coordinate system')\n\n # parameters\n self.add_discrete_input('downwind', False)\n self.add_input('tilt', 0.0, units='deg')\n\n # out\n self.add_output('top_F', np.zeros(3), units='N') # in yaw-aligned\n self.add_output('top_M', np.zeros(3), units='N*m')\n\n self.declare_partials('top_F', ['F','M','hub_cm','rna_mass','rna_cm'])\n self.declare_partials('top_M', ['F','M','hub_cm','rna_mass','rna_cm'])\n\n\n def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):\n\n F = inputs['F']\n M = inputs['M']\n tilt = float(inputs['tilt'])\n \n F = DirectionVector.fromArray(F).hubToYaw(tilt)\n M = DirectionVector.fromArray(M).hubToYaw(tilt)\n\n # change x-direction if downwind\n hub_cm = np.copy(inputs['hub_cm'])\n rna_cm = np.copy(inputs['rna_cm'])\n if discrete_inputs['downwind']:\n hub_cm[0] *= -1\n rna_cm[0] *= -1\n hub_cm = DirectionVector.fromArray(hub_cm)\n rna_cm = DirectionVector.fromArray(rna_cm)\n self.save_rhub = hub_cm\n self.save_rcm = rna_cm\n\n # aerodynamic moments\n M = M + hub_cm.cross(F)\n self.saveF = F\n\n '''\n Removing this permanently gbarter 1/2020 because of too much confusion in TowerSE and Frame3DD\n From now on TowerSE will always add to loading of added mass items, including RNA\n \n # add weight loads\n F_w = DirectionVector(0.0, 0.0, -float(inputs['rna_mass'])*gravity)\n M_w = rna_cm.cross(F_w)\n self.saveF_w = F_w\n\n Fout = F + F_w\n\n if discrete_inputs['rna_weightM']:\n Mout = M + M_w\n else:\n Mout = M\n #REMOVE WEIGHT EFFECT TO ACCOUNT FOR P-Delta Effect\n print(\"!!!! No weight effect on rotor moments -TowerSE !!!!\")\n '''\n Fout = F\n Mout = M\n\n # put back in array\n outputs['top_F'] = np.array([Fout.x, Fout.y, Fout.z])\n outputs['top_M'] = np.array([Mout.x, Mout.y, Mout.z])\n\n def compute_partials(self, inputs, J, discrete_inputs):\n\n dF = DirectionVector.fromArray(inputs['F']).hubToYaw(inputs['tilt'])\n dFx, dFy, dFz = dF.dx, dF.dy, dF.dz\n\n dtopF_dFx = np.array([dFx['dx'], dFy['dx'], dFz['dx']])\n dtopF_dFy = np.array([dFx['dy'], dFy['dy'], dFz['dy']])\n dtopF_dFz = np.array([dFx['dz'], dFy['dz'], dFz['dz']])\n dtopF_dF = hstack([dtopF_dFx, dtopF_dFy, dtopF_dFz])\n dtopF_w_dm = np.array([0.0, 0.0, -gravity])\n\n #dtopF = hstack([dtopF_dF, np.zeros((3, 6)), dtopF_w_dm, np.zeros((3, 3))])\n\n\n dM = DirectionVector.fromArray(inputs['M']).hubToYaw(inputs['tilt'])\n dMx, dMy, dMz = dM.dx, dM.dy, dM.dz\n dMxcross, dMycross, dMzcross = self.save_rhub.cross_deriv(self.saveF, 'dr', 'dF')\n\n dtopM_dMx = np.array([dMx['dx'], dMy['dx'], dMz['dx']])\n dtopM_dMy = np.array([dMx['dy'], dMy['dy'], dMz['dy']])\n dtopM_dMz = np.array([dMx['dz'], dMy['dz'], dMz['dz']])\n dtopM_dM = hstack([dtopM_dMx, dtopM_dMy, dtopM_dMz])\n dM_dF = np.array([dMxcross['dF'], dMycross['dF'], dMzcross['dF']])\n\n dtopM_dFx = np.dot(dM_dF, dtopF_dFx)\n dtopM_dFy = np.dot(dM_dF, dtopF_dFy)\n dtopM_dFz = np.dot(dM_dF, dtopF_dFz)\n dtopM_dF = hstack([dtopM_dFx, dtopM_dFy, dtopM_dFz])\n dtopM_dr = np.array([dMxcross['dr'], dMycross['dr'], dMzcross['dr']])\n\n #dMx_w_cross, dMy_w_cross, dMz_w_cross = self.save_rcm.cross_deriv(self.saveF_w, 'dr', 'dF')\n\n #if discrete_inputs['rna_weightM']:\n # dtopM_drnacm = np.array([dMx_w_cross['dr'], dMy_w_cross['dr'], dMz_w_cross['dr']])\n # dtopM_dF_w = np.array([dMx_w_cross['dF'], dMy_w_cross['dF'], dMz_w_cross['dF']])\n #else:\n # dtopM_drnacm = np.zeros((3, 3))\n # dtopM_dF_w = np.zeros((3, 3))\n dtopM_drnacm = np.zeros((3, 3))\n dtopM_dF_w = np.zeros((3, 3))\n dtopM_dm = np.dot(dtopM_dF_w, dtopF_w_dm)\n\n if discrete_inputs['downwind']:\n dtopM_dr[:, 0] *= -1\n dtopM_drnacm[:, 0] *= -1\n\n #dtopM = hstack([dtopM_dF, dtopM_dM, dtopM_dr, dtopM_dm, dtopM_drnacm])\n\n \n J['top_F', 'F'] = dtopF_dF\n J['top_F', 'M'] = np.zeros((3, 3))\n J['top_F', 'hub_cm'] = np.zeros((3, 3))\n J['top_F', 'rna_mass'] = dtopF_w_dm\n J['top_F', 'rna_cm'] = np.zeros((3, 3))\n\n J['top_M', 'F'] = dtopM_dF\n J['top_M', 'M'] = dtopM_dM\n J['top_M', 'hub_cm'] = dtopM_dr\n J['top_M', 'rna_mass'] = dtopM_dm\n J['top_M', 'rna_cm'] = dtopM_drnacm\n\n \n\n\nclass RNA(Group):\n def initialize(self):\n self.options.declare('nLC')\n \n def setup(self):\n nLC = self.options['nLC']\n \n self.add_subsystem('mass', RNAMass(), promotes=['*'])\n for k in range(nLC):\n lc = '' if nLC==1 else str(k+1)\n self.add_subsystem('loads'+lc, RotorLoads(), promotes=['rna_mass','rna_cm','hub_cm','downwind','tilt'])\n\n \n", "import numpy as np\nimport os\nimport decimal\n# import pandas\nfrom collections import OrderedDict\n\ndef write_wind(V_ref, alpha, Beta, Z_hub, filename, template_file):\n\n Height=(np.array([np.arange(0,181,10)],dtype=float))\n \n new_Height=(Height/Z_hub).T\n \n Height=(np.array([np.arange(0,181,10)])).T\n a=len(Height)\n \n \n U=np.zeros((a,1),dtype=float)\n Beta1=np.zeros((a,1),dtype=float)\n \n \n for i in range(0,a):\n U[i,0]= V_ref*(new_Height[i,0])**alpha\n Beta1[i,0]= (Beta/63)*(Height[i,0])-90*(Beta/63)\n \n \n df1= ['%.3f'% x for x in Height]\n df2 = ['%.3f'% x for x in U]\n df3 =['%.3f'% x for x in Beta1]\n \n\n with open(template_file,'r') as f:\n get_all=f.readlines() \n \n with open(filename,'w') as f2:\n for i,line in enumerate(get_all,1):\n if i < 12: \n f2.writelines(line)\n else:\n for p in range(len(df1)):\n if len(str(df1[p]))<5 :\n f2.write(str(df1[p]) + \" \" + str(df2[p]) + \" \" + str(df3[p]) + \"\\n\")\n else:\n f2.write(str(df1[p]) + \" \" + str(df2[p]) + \" \" + str(df3[p]) + \"\\n\")\n break\n f2.close()\n \n\n\n", "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# Copyright (C) 2014 mdm \n# map[dot]plus[dot]plus[dot]help[at]gmail \n# \n# Licensed to the Apache Software Foundation (ASF) under one \n# or more contributor license agreements. See the NOTICE file \n# distributed with this work for additional information \n# regarding copyright ownership. The ASF licenses this file \n# to you under the Apache License, Version 2.0 (the \n# \"License\"); you may not use this file except in compliance \n# with the License. You may obtain a copy of the License at \n# \n# http://www.apache.org/licenses/LICENSE-2.0 \n# \n# Unless required by applicable law or agreed to in writing, \n# software distributed under the License is distributed on an \n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY \n# KIND, either express or implied. See the License for the \n# specific language governing permissions and limitations \n# under the License. \n\nfrom wisdem.pymap import *\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\nif __name__ == '__main__': \n np.set_printoptions(formatter={'float': '{: 13.1f}'.format},linewidth=100)\n\n mooring_1 = pyMAP( )\n \n mooring_1.map_set_sea_depth(320)\n mooring_1.map_set_gravity(9.81)\n mooring_1.map_set_sea_density(1025.0)\n \n # mooring_1.read_file(\"input.map\") # 350 m depth \n # mooring_1.read_file(\"inwind_new.map\")\n mooring_1.read_file(\"unit_example.map\")\n # mooring_1.read_file(\"baseline_1.map\") # 120 m depth \n # mooring_1.read_file(\"baseline_2.map\") # 350 m depth \n # mooring_1.read_file(\"baseline_5.map\") # 80 m depth\n # mooring_1.read_file(\"NRELOffshrBsline5MW_Platform_OC3Hywind.map\") # 320 m depth\n # mooring_1.read_file(\"NRELOffshrBsline5MW_Platform_OC3Hywind_segmented.map\") # 320 m depth\n #mooring_1.read_file(\"NRELOffshrBsLine5MW_OC4.map\") # 200 m depth\n # mooring_1.read_file(\"NRELOffshrBsLine5MW_TLP.map\") # 200 m depth\n\n # mooring_1.summary_file('name_me.txt')\n mooring_1.init( )\n\n epsilon = 1e-5\n K = mooring_1.linear(epsilon) \n print(\"\\nHere is the linearized stiffness matrix with zero vessel displacement:\")\n print(np.array(K))\n\n #mooring_1.displace_vessel(5,0,0,0,0,0)\n #mooring_1.update_states(0.0,0)\n\n #mooring_1.displace_vessel(17,0,0,0,0,0)\n #mooring_1.update_states(0.0,0)\n\n # H,V = mooring_1.get_fairlead_force_2d(0) \n # print H, \" \", V\n \n # fx,fy,fz = mooring_1.get_fairlead_force_3d(0) \n # print fx, \" \", fy, \" \", fz\n # \n # ''' \n # function residual at (hopefully) the solution\n # '''\n # \n # print mooring_1.funch(0) \n # print mooring_1.funcl(0)\n # \n # '''\n # derivatives at solution\n # '''\n # print mooring_1.dxdh(0)\n # print mooring_1.dxdv(0) \n # print mooring_1.dzdh(0)\n # print mooring_1.dzdv(0)\n # \n # print mooring_1.dxdh(1)\n # print mooring_1.dxdv(1) \n # print mooring_1.dzdh(1)\n # print mooring_1.dzdv(1)\n\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(0,mooring_1.size_lines()):\n x = mooring_1.plot_x( i, 20 )\n y = mooring_1.plot_y( i, 20 )\n z = mooring_1.plot_z( i, 20 ) \n ax.plot(x,y,z,'b-')\n \n ax.set_xlabel('X [m]')\n ax.set_ylabel('Y [m]')\n ax.set_zlabel('Z [m]') \n #ax.set_xlim([-3.0,3]) \n #ax.set_ylim([-3.0,3]) \n #ax.set_zlim([-3.0,0]) \n \n plt.show()\n \n mooring_1.end( )\n", "\"\"\"\nTesting framework for the `ArrayCableInstallation` class.\n\"\"\"\n\n__author__ = [\"Rob Hammond\", \"Jake Nunemaker\"]\n__copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\"\n__maintainer__ = \"Jake Nunemaker\"\n__email__ = \"[email protected]\"\n\n\nfrom copy import deepcopy\n\nimport pandas as pd\nimport pytest\n\nfrom wisdem.test.test_orbit.data import test_weather\nfrom wisdem.orbit.library import extract_library_specs\nfrom wisdem.orbit.core._defaults import process_times as pt\nfrom wisdem.orbit.phases.install import ArrayCableInstallation\n\nbase_config = extract_library_specs(\"config\", \"array_cable_install\")\nsimul_config = deepcopy(base_config)\n_ = simul_config.pop(\"array_cable_bury_vessel\")\n\n\[email protected](\n \"config\", (base_config, simul_config), ids=[\"separate\", \"simultaneous\"]\n)\ndef test_simulation_setup(config):\n\n sim = ArrayCableInstallation(config)\n assert sim.env\n\n\[email protected](\n \"config\", (base_config, simul_config), ids=[\"separate\", \"simultaneous\"]\n)\ndef test_vessel_initialization(config):\n\n sim = ArrayCableInstallation(config)\n assert sim.install_vessel\n assert sim.install_vessel.cable_storage\n\n if config.get(\"array_cable_bury_vessel\", None):\n assert sim.bury_vessel\n\n\[email protected](\n \"config\", (base_config, simul_config), ids=[\"separate\", \"simultaneous\"]\n)\[email protected](\n \"weather\", (None, test_weather), ids=[\"no_weather\", \"test_weather\"]\n)\ndef test_for_complete_logging(config, weather):\n\n sim = ArrayCableInstallation(config, weather=weather)\n sim.run()\n\n df = pd.DataFrame(sim.env.actions)\n df = df.loc[df[\"action\"] != \"Mobilize\"].reset_index(drop=True)\n df = df.assign(shift=(df[\"time\"] - df[\"time\"].shift(1)))\n\n for vessel in df[\"agent\"].unique():\n _df = df[df[\"agent\"] == vessel].copy()\n _df = _df.assign(shift=(_df[\"time\"] - _df[\"time\"].shift(1)))\n assert (_df[\"shift\"] - _df[\"duration\"]).fillna(0.0).abs().max() < 1e-9\n\n assert ~df[\"cost\"].isnull().any()\n _ = sim.agent_efficiencies\n _ = sim.detailed_output\n\n\ndef test_simultaneous_speed_kwargs():\n\n sim = ArrayCableInstallation(simul_config)\n sim.run()\n baseline = sim.total_phase_time\n\n key = \"cable_lay_bury_speed\"\n val = pt[key] * 0.1\n\n kwargs = {key: val}\n\n sim = ArrayCableInstallation(simul_config, **kwargs)\n sim.run()\n\n assert sim.total_phase_time > baseline\n\n\ndef test_separate_speed_kwargs():\n\n sim = ArrayCableInstallation(base_config)\n sim.run()\n df = pd.DataFrame(sim.env.actions)\n\n base_lay = sum(df.loc[df[\"action\"] == \"Lay Cable\"][\"duration\"])\n base_bury = sum(df.loc[df[\"action\"] == \"Bury Cable\"][\"duration\"])\n\n kwargs = {\n \"cable_lay_speed\": pt[\"cable_lay_speed\"] * 0.1,\n \"cable_bury_speed\": pt[\"cable_bury_speed\"] * 0.1,\n }\n\n new = ArrayCableInstallation(base_config, **kwargs)\n new.run()\n df = pd.DataFrame(new.env.actions)\n\n new_lay = sum(df.loc[df[\"action\"] == \"Lay Cable\"][\"duration\"])\n assert new_lay > base_lay\n\n new_bury = sum(df.loc[df[\"action\"] == \"Bury Cable\"][\"duration\"])\n assert new_bury > base_bury\n\n\ndef test_kwargs_for_export_install():\n\n sim = ArrayCableInstallation(base_config)\n sim.run()\n baseline = sim.total_phase_time\n\n keywords = [\n \"cable_load_time\",\n \"site_position_time\",\n \"cable_prep_time\",\n \"cable_lower_time\",\n \"cable_pull_in_time\",\n \"cable_termination_time\",\n ]\n\n failed = []\n\n for kw in keywords:\n\n default = pt[kw]\n\n if \"speed\" in kw:\n _new = default - 0.05\n\n if _new <= 0:\n raise Exception(f\"'{kw}' is less than 0.\")\n\n kwargs = {kw: _new}\n\n else:\n kwargs = {kw: default + 2}\n\n new_sim = ArrayCableInstallation(base_config, **kwargs)\n new_sim.run()\n new_time = new_sim.total_phase_time\n\n if new_time > baseline:\n pass\n\n else:\n failed.append(kw)\n\n if failed:\n raise Exception(f\"ExpInstall: '{failed}' not affecting results.\")\n\n else:\n assert True\n" ]
[ [ "numpy.dot", "numpy.eye", "numpy.copy", "numpy.outer", "numpy.array", "numpy.zeros", "numpy.vstack" ], [ "numpy.arange", "numpy.zeros" ], [ "numpy.array", "numpy.set_printoptions", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
baajur/google-research
[ "9049acf9246c1b75170f0c6757e62a8f619a9db6" ]
[ "kws_streaming/layers/stream_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for kws_streaming.layers.stream.\"\"\"\n\nimport numpy as np\nfrom kws_streaming.layers import stream\nfrom kws_streaming.layers.compat import tf\nfrom kws_streaming.layers.compat import tf1\nfrom kws_streaming.layers.modes import Modes\nfrom kws_streaming.models import utils\ntf1.disable_eager_execution()\n\n\n# Toy example which require signal processing in time\nclass Sum(tf.keras.layers.Layer):\n \"\"\"Applies Sum on time_dim.\"\"\"\n\n def __init__(self, time_dim=1, **kwargs):\n super(Sum, self).__init__(**kwargs)\n self.time_dim = time_dim\n\n def call(self, inputs):\n return tf.keras.backend.sum(inputs, axis=self.time_dim)\n\n def get_config(self):\n config = {\"time_dim\": self.time_dim}\n base_config = super(Sum, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass StreamTest(tf.test.TestCase):\n\n def test_streaming_with_effective_tdim(self):\n time_size = 10\n feature_size = 3\n batch_size = 1\n\n time_dim = 1 # index of time dimensions\n ring_buffer_size_in_time_dim = 3 # effective size of aperture in time dim\n\n inputs = tf.keras.layers.Input(\n shape=(time_size, feature_size),\n batch_size=batch_size,\n name=\"inp_sequence\")\n\n mode = Modes.TRAINING\n\n # in streaming mode it will create a\n # ring buffer with time dim size ring_buffer_size_in_time_dim\n outputs = stream.Stream(\n cell=Sum(time_dim=time_dim),\n mode=mode,\n ring_buffer_size_in_time_dim=ring_buffer_size_in_time_dim)(inputs)\n model_train = tf.keras.Model(inputs, outputs)\n model_train.summary()\n\n mode = Modes.STREAM_EXTERNAL_STATE_INFERENCE\n input_tensors = [\n tf.keras.layers.Input(\n shape=(\n 1, # time dim is size 1 in streaming mode\n feature_size,\n ), batch_size=batch_size, name=\"inp_stream\")\n ]\n # convert non streaming model to streaming one\n model_stream = utils.convert_to_inference_model(model_train,\n input_tensors, mode)\n model_stream.summary()\n\n # second input tostream model is a state, so we can use its shape\n input_state_np = np.zeros(model_stream.inputs[1].shape, dtype=np.float32)\n\n # input test data\n non_stream_input = np.random.randint(\n 1, 10, size=(batch_size, time_size, feature_size))\n\n # run streaming inference\n # iterate over time dim sample by sample\n for i in range(input_state_np.shape[1]):\n input_stream_np = np.expand_dims(non_stream_input[0][i], 0)\n input_stream_np = np.expand_dims(input_stream_np, 1)\n input_stream_np = input_stream_np.astype(np.float32)\n output_stream_np, output_state_np = model_stream.predict(\n [input_stream_np, input_state_np])\n input_state_np = output_state_np # update input state\n\n # emulate sliding window summation\n target = np.sum(\n non_stream_input[:, max(0, i - ring_buffer_size_in_time_dim):i + 1],\n axis=time_dim)\n self.assertAllEqual(target, output_stream_np)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "numpy.expand_dims", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michaelsilverstein/scikit-bio
[ "876efcf688a8f15e89bb70fa835a2f2a84b534c1" ]
[ "skbio/stats/distance/tests/test_anosim.py" ]
[ "# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport io\nfrom functools import partial\nfrom unittest import TestCase, main\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util.testing import assert_series_equal\n\nfrom skbio import DistanceMatrix\nfrom skbio.stats.distance import anosim\n\n\nclass TestANOSIM(TestCase):\n \"\"\"All results were verified with R (vegan::anosim).\"\"\"\n\n def setUp(self):\n # Distance matrices with and without ties in the ranks, with 2 groups\n # of equal size.\n dm_ids = ['s1', 's2', 's3', 's4']\n self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']\n self.df = pd.read_csv(\n io.StringIO('ID,Group\\ns2,Control\\ns3,Fast\\ns4,Fast\\ns5,Control\\n'\n 's1,Control'), index_col=0)\n\n self.dm_ties = DistanceMatrix([[0, 1, 1, 4],\n [1, 0, 3, 2],\n [1, 3, 0, 3],\n [4, 2, 3, 0]], dm_ids)\n\n self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],\n [1, 0, 3, 2],\n [5, 3, 0, 3],\n [4, 2, 3, 0]], dm_ids)\n\n # Test with 3 groups of unequal size. This data also generates a\n # negative R statistic.\n self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',\n 'Treatment1', 'Control', 'Control']\n\n # Equivalent grouping but with different labels -- groups should be\n # assigned different integer labels but results should be the same.\n self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']\n\n self.dm_unequal = DistanceMatrix(\n [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],\n [1.0, 0.0, 0.002, 0.42, 0.998, 0.0],\n [0.1, 0.002, 0.0, 1.0, 0.123, 1.0],\n [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],\n [1.0, 0.998, 0.123, 0.123, 0.0, 0.5],\n [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],\n ['s1', 's2', 's3', 's4', 's5', 's6'])\n\n # Expected series index is the same across all tests.\n self.exp_index = ['method name', 'test statistic name', 'sample size',\n 'number of groups', 'test statistic', 'p-value',\n 'number of permutations']\n\n # Stricter series equality testing than the default.\n self.assert_series_equal = partial(assert_series_equal,\n check_index_type=True,\n check_series_type=True)\n\n def test_ties(self):\n # Ensure we get the same results if we rerun the method using the same\n # inputs. Also ensure we get the same results if we run the method\n # using a grouping vector or a data frame with equivalent groupings.\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999],\n name='ANOSIM results')\n\n for _ in range(2):\n np.random.seed(0)\n obs = anosim(self.dm_ties, self.grouping_equal)\n self.assert_series_equal(obs, exp)\n\n for _ in range(2):\n np.random.seed(0)\n obs = anosim(self.dm_ties, self.df, column='Group')\n self.assert_series_equal(obs, exp)\n\n def test_no_ties(self):\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999],\n name='ANOSIM results')\n np.random.seed(0)\n obs = anosim(self.dm_no_ties, self.grouping_equal)\n self.assert_series_equal(obs, exp)\n\n def test_no_permutations(self):\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0],\n name='ANOSIM results')\n obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0)\n self.assert_series_equal(obs, exp)\n\n def test_unequal_group_sizes(self):\n exp = pd.Series(index=self.exp_index,\n data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999],\n name='ANOSIM results')\n\n np.random.seed(0)\n obs = anosim(self.dm_unequal, self.grouping_unequal)\n self.assert_series_equal(obs, exp)\n\n np.random.seed(0)\n obs = anosim(self.dm_unequal, self.grouping_unequal_relabeled)\n self.assert_series_equal(obs, exp)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.Series", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
eldar/acsm
[ "04069e8bb4c12185473dc10c3355e5367fa98968", "04069e8bb4c12185473dc10c3355e5367fa98968" ]
[ "acsm/benchmark/pck_eval.py", "acsm/data/base2.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport json\nimport os.path as osp\nimport numpy as np\nimport pprint\nimport pdb\nfrom . import evaluate_pr\nimport scipy.io as sio\n'''\nintervals : Define thresholds to evaluate pck score \nkpnames : Keypoint names\nbench_stats : stats\n'''\n\n\ndef remove_nans(x):\n return x[~np.isnan(x)]\n\n\ndef pck_at_intervals(intervals, error):\n accuracy = []\n for interval in intervals:\n accuracy.append(float(np.round(np.mean(np.array(error) < interval), 3)))\n return accuracy\n\n\ndef ck_at_interval(intervals, error):\n cks = []\n for interval in intervals:\n cks.append(np.array(error) < interval)\n return cks # len(intervals) x error.shape\n\n\ndef benchmark_all_instances(intervals, kpnames, bench_stats, img_size):\n stats = {}\n plot_intervals = [0.025 * i for i in range(40)]\n kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1\n pdb.set_trace()\n # valid_inds =\n kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'\n bench_stats_kps_err = bench_stats['kps_err'] / img_size\n mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask\n stats['mean_kp_err'] = [\n float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)\n ]\n stats['median_kp_err'] = [\n float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)\n ]\n stats['std_kp_err'] = [\n float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)\n ]\n stats['data'] = {}\n stats['pck'] = {}\n stats['interval'] = intervals\n stats['kp_names'] = kpnames\n stats['eval_params'] = {}\n\n for kpx, kp_name in enumerate(kpnames):\n stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])\n stats['data'][kp_name].sort()\n stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]\n stats['pck'][kp_name] = pck_at_intervals(\n intervals, stats['data'][kp_name]\n )\n stats['eval_params'][kp_name] = {}\n stats['eval_params'][kp_name]['thresh'] = plot_intervals\n stats['eval_params'][kp_name]['acc'] = pck_at_intervals(\n plot_intervals, stats['data'][kp_name]\n )\n\n return stats\n\n\ndef benchmark_all_instances_2(\n intervals, kpnames, bench_stats, img_size, select_kp_ids=None\n):\n stats = {}\n plot_intervals = [0.025 * i for i in range(40)]\n kp_error_nan_mask = bench_stats['kps_err'][:, :, 1] * 1\n\n # valid_inds =\n kp_error_nan_mask[kp_error_nan_mask < 0.5] = 'nan'\n bench_stats_kps_err = bench_stats['kps_err'] / img_size\n mean_kp_error = bench_stats_kps_err[:, :, 0] * kp_error_nan_mask\n stats['mean_kp_err'] = [\n float(t) for t in np.round(np.nanmean(mean_kp_error, 0), 4)\n ]\n stats['median_kp_err'] = [\n float(t) for t in np.round(np.nanmedian(mean_kp_error, 0), 4)\n ]\n stats['std_kp_err'] = [\n float(t) for t in np.round(np.nanstd(mean_kp_error, 0), 4)\n ]\n stats['data'] = {}\n stats['pck'] = {}\n stats['interval'] = intervals\n stats['kp_names'] = kpnames\n stats['eval_params'] = {}\n\n for kpx, kp_name in enumerate(kpnames):\n stats['data'][kp_name] = remove_nans(mean_kp_error[:, kpx])\n stats['data'][kp_name].sort()\n stats['data'][kp_name] = [float(t) for t in stats['data'][kp_name]]\n stats['pck'][kp_name] = pck_at_intervals(\n intervals, stats['data'][kp_name]\n )\n stats['eval_params'][kp_name] = {}\n stats['eval_params'][kp_name]['thresh'] = plot_intervals\n stats['eval_params'][kp_name]['acc'] = pck_at_intervals(\n plot_intervals, stats['data'][kp_name]\n )\n\n # pdb.set_trace()\n if select_kp_ids is not None:\n for group_name in select_kp_ids.keys():\n kp_ids = select_kp_ids[group_name]\n select_kp_error = mean_kp_error[:, kp_ids]\n samples = remove_nans(select_kp_error.reshape(-1))\n stats['eval_params'][\n '{}_acc'.format(group_name)\n ] = pck_at_intervals(intervals, samples.tolist())\n\n samples = remove_nans(mean_kp_error.reshape(-1))\n stats['eval_params']['acc'] = pck_at_intervals(intervals, samples.tolist())\n return stats\n\n\ndef benchmark_vis_instances(\n intervals, dist_thresholds, kpnames, bench_stats, img_size\n):\n stats = {}\n stats['data'] = {}\n stats['eval_params'] = {}\n stats['pck'] = {}\n stats['interval'] = intervals\n bench_stats_kps_error = 1 * bench_stats['kps_err']\n bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size\n ndata_points, nkps, _ = bench_stats['kps_err'].shape\n\n kps_vis1 = bench_stats['kps1'][:, :, 2] > 200\n kps_vis2 = bench_stats['kps2'][:, :, 2] > 200\n stats['eval_params']['total'] = np.sum(kps_vis1, axis=0) + 1E-10\n for dx, dist_thresh in enumerate(dist_thresholds):\n stats['eval_params'][dx] = {}\n stats['eval_params'][dx]['correct'] = np.zeros(\n (len(kpnames), len(intervals))\n )\n for kpx, kp_name in enumerate(kpnames):\n valid_inds = np.where(\n bench_stats_kps_error[:, kpx, 2] < dist_thresh\n )[0].tolist()\n common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5\n )[0].tolist()\n valid_inds = set(valid_inds)\n common_inds = set(common_inds)\n ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])\n ck = np.stack(ck, axis=1)\n ex = np.array(list(common_inds & valid_inds))\n if len(ex) > 0:\n stats['eval_params'][dx]['correct'][kpx] += np.sum(\n ck[ex, :], axis=0\n )\n\n kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]\n kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]\n ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))\n ).astype(np.int)\n if len(ex) > 0:\n stats['eval_params'][dx]['correct'][kpx] += np.sum(\n bench_stats_kps_error[ex, kpx, 2] > dist_thresh\n )\n stats['eval_params'][dx]['acc'] = stats['eval_params'][dx]['correct'] / \\\n stats['eval_params']['total'].reshape(-1, 1)\n return stats\n\n\ndef collate_all_instances(intervals, kp_names, bench_stats, img_size):\n bench_stats_kps_error = bench_stats['kps_err'] * 1\n bench_stats_kps_error[:, :, 0] = bench_stats_kps_error[:, :, 0] / img_size\n prediction_error = [] # N x 1\n prediction_score = [] # N x 1\n prediction_label = [] # N x len(intervals)\n gt_label = []\n\n kps_vis1 = bench_stats['kps1'][:, :, 2] > 200\n kps_vis2 = bench_stats['kps2'][:, :, 2] > 200\n\n for kpx, kp_name in enumerate(kp_names):\n common_inds = np.where(bench_stats_kps_error[:, kpx, 1] > 0.5\n )[0].tolist()\n ck = ck_at_interval(intervals, bench_stats_kps_error[:, kpx, 0])\n ck = np.stack(ck, axis=1)\n ex = np.array(list(common_inds))\n if len(ex) > 0:\n prediction_error.append(bench_stats_kps_error[ex, kpx, 0])\n prediction_score.append(bench_stats_kps_error[ex, kpx, 2])\n prediction_label.append(ck[ex, :] * 1)\n gt_label.append(ck[ex, :] * 0 + 1)\n\n kps_vis1_ind = np.where(kps_vis1[:, kpx])[0]\n kps_vis2_ind = np.where(kps_vis2[:, kpx])[0]\n ex = np.array(list(set(kps_vis1_ind) - set(kps_vis2_ind))\n ).astype(np.int)\n if len(ex) > 0:\n prediction_error.append(bench_stats_kps_error[ex, kpx, 0])\n prediction_score.append(bench_stats_kps_error[ex, kpx, 2])\n prediction_label.append(ck[ex, :] * 0)\n gt_label.append(ck[ex, :] * 0)\n\n prediction_error = np.concatenate(prediction_error, axis=0)\n prediction_score = np.concatenate(prediction_score, axis=0)\n prediction_label = np.concatenate(prediction_label, axis=0)\n gt_label = np.concatenate(gt_label, axis=0)\n\n stats = {}\n stats['pred_label'] = prediction_label\n stats['gt_label'] = gt_label\n stats['score'] = prediction_score # lower the score better it is.\n return stats\n\n\nkp_eval_thresholds = [0.05, 0.1, 0.2]\n# kp_eval_thresholds = [0.05, 1.0]\n'''\nselect_kp_ids dict is a group of kp points\n'''\n\n\ndef run_evaluation(\n bench_stats, n_iter, results_dir, img_size, kp_names, dist_thresholds,\n select_kp_ids\n):\n json_file = osp.join(results_dir, 'stats_m1_{}.json'.format(n_iter))\n stats_m1 = benchmark_all_instances_2(\n kp_eval_thresholds, kp_names, bench_stats, img_size, select_kp_ids\n )\n stats = stats_m1\n print(' Method 1 | Keypoint | Median Err | Mean Err | STD Err')\n pprint.pprint(\n zip(\n stats['kp_names'], stats['median_kp_err'], stats['mean_kp_err'],\n stats['std_kp_err']\n )\n )\n print('PCK Values')\n pprint.pprint(stats['interval'])\n pprint.pprint(stats['pck'])\n mean_pck = {}\n # pdb.set_trace()\n for i, thresh in enumerate(stats['interval']):\n mean_pck[thresh] = []\n for kp_name in kp_names:\n mean_pck[thresh].append(stats['pck'][kp_name][i])\n\n mean_pck = {k: np.mean(np.array(t)) for k, t in mean_pck.items()}\n pprint.pprint('Mean PCK ')\n pprint.pprint(mean_pck)\n\n print('Instance Average **** ')\n pprint.pprint(stats['eval_params']['acc'])\n for group_name in select_kp_ids.keys():\n print('Instance Average {} **** '.format(group_name))\n pprint.pprint(stats['eval_params']['{}_acc'.format(group_name)])\n\n print('########################## ')\n\n with open(json_file, 'w') as f:\n json.dump(stats, f)\n\n if dist_thresholds is not None:\n stats_m1 = benchmark_vis_instances(\n kp_eval_thresholds, dist_thresholds, kp_names, bench_stats, img_size\n )\n stats = stats_m1\n\n mean_pck = {}\n # points_per_kp = {k: v for k, v in zip(kp_names, stats['eval_params'][0]['npoints'])}\n # points_per_thresh = np.sum(np.array(points_per_kp.values()))\n for dx, thresh in enumerate(dist_thresholds):\n mean_pck[dx] = {}\n for i, thresh in enumerate(stats['interval']):\n mean_pck[dx][thresh] = []\n for kx, kp_name in enumerate(kp_names):\n mean_pck[dx][thresh].append(\n stats['eval_params'][dx]['acc'][kx, i]\n )\n\n mean_pck[dx] = {\n k: np.round(np.mean(np.array(t)), 4)\n for k, t in mean_pck[dx].items()\n }\n\n # pdb.set_trace()\n print('***** Distance Thresholds ***** ')\n pprint.pprint('Mean PCK Acc')\n pprint.pprint(mean_pck)\n # pprint.pprint(points_per_kp)\n\n stats = collate_all_instances(\n kp_eval_thresholds, kp_names, bench_stats, img_size\n )\n pr_stats = evaluate_pr.inst_bench_evaluate(\n stats['pred_label'], stats['gt_label'], stats['score']\n )\n pr_mat_file = osp.join(results_dir, 'pr_{}.mat'.format(n_iter))\n\n sio.savemat(pr_mat_file, pr_stats)\n return stats_m1\n", "\"\"\"\nCode borrowed from https://github.com/shubhtuls/toe/blob/master/data/base.py\nBase data loading class.\nShould output:\n - img: B X 3 X H X W\n - mask: B X H X W\n - kp (optional): B X nKp X 2\n - sfm_pose (optional): B X 7 (s, tr, q)\n (kp, sfm_pose) correspond to image coordinates in [-1, 1]\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport numpy as np\n\nimport imageio\nimport scipy.linalg\nimport scipy.ndimage.interpolation\nfrom absl import flags, app\nfrom skimage import measure\nimport cv2\nfrom scipy import ndimage\nfrom skimage import measure\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataloader import default_collate\nimport pdb\nfrom ..utils import image as image_utils\nfrom ..utils import transformations\n\nflags.DEFINE_boolean('dl_shuffle_inds', False, 'Shuffle inds')\nflags.DEFINE_integer('img_size', 256, 'image size')\nflags.DEFINE_enum(\n 'split', 'train', ['train', 'val', 'all', 'test'], 'eval split'\n)\nflags.DEFINE_float(\n 'padding_frac', 0.05, 'bbox is increased by this fraction of max_dim'\n)\nflags.DEFINE_float(\n 'jitter_frac', 0.05, 'bbox is jittered by this fraction of max_dim'\n)\n\nflags.DEFINE_boolean('tight_crop', False, 'Use Tight crops')\nflags.DEFINE_boolean('flip_train', False, 'Mirror Images while training')\nflags.DEFINE_integer('n_contour', 1000, 'N random samples from the contours')\nflags.DEFINE_boolean(\n 'dl_out_pascal', True, 'Use pascal (implies use keypoints)'\n)\nflags.DEFINE_boolean('dl_out_imnet', True, 'Use iment')\nflags.DEFINE_string('pascal_class', 'horse', 'PASCAL VOC category name/ Cub')\nflags.DEFINE_integer('num_kps', 12, 'Number of keypoints')\n\n\n# -------------- Dataset ------------- #\n# ------------------------------------ #\nclass BaseDataset(Dataset):\n ''' \n img, mask, kp, pose data loader\n '''\n def __init__(self, opts, filter_key=None):\n # Child class should define/load:\n # self.img_dir\n # self.anno\n # self.kp_perm (optional)\n # self.anno_sfm (optional)\n self._out_kp = False\n self._out_pose = False\n self._shuffle_inds = opts.dl_shuffle_inds\n if self._shuffle_inds:\n self._index_perm = None\n\n self.opts = opts\n self.img_size = opts.img_size\n self.jitter_frac = opts.jitter_frac\n self.padding_frac = opts.padding_frac\n self.filter_key = filter_key\n self.n_contour = opts.n_contour\n\n def normalize_kp(self, kp, img_h, img_w):\n vis = kp[:, 2, None] > 0\n new_kp = np.stack(\n [2 * (kp[:, 0] / img_w) - 1, 2 * (kp[:, 1] / img_h) - 1, kp[:, 2]]\n ).T\n new_kp = vis * new_kp\n\n return new_kp\n\n def normalize_pose(self, sfm_pose, img_h, img_w):\n sfm_pose[0] *= (1.0 / img_w + 1.0 / img_h)\n sfm_pose[1][0] = 2.0 * (sfm_pose[1][0] / img_w) - 1\n sfm_pose[1][1] = 2.0 * (sfm_pose[1][1] / img_h) - 1\n return sfm_pose\n\n def crop_image(self, img, mask, bbox, kp=None, sfm_pose=None):\n # crop image and mask and translate kps\n img = image_utils.crop(img, bbox, bgval=1)\n mask = image_utils.crop(mask, bbox, bgval=0)\n if (kp is not None):\n vis = kp[:, 2] > 0\n kp[vis, 0] -= bbox[0]\n kp[vis, 1] -= bbox[1]\n if sfm_pose is not None:\n sfm_pose[1][0] -= bbox[0]\n sfm_pose[1][1] -= bbox[1]\n return img, mask, kp, sfm_pose\n\n def scale_image(self, img, mask, kp=None, sfm_pose=None):\n # Scale image so largest bbox size is img_size\n bwidth = np.shape(img)[0]\n bheight = np.shape(img)[1]\n scale = self.img_size / float(max(bwidth, bheight))\n img_scale, _ = image_utils.resize_img(img, scale)\n\n mask_scale, _ = image_utils.resize_img(mask, scale)\n if kp is not None:\n vis = kp[:, 2] > 0\n kp[vis, :2] *= scale\n if sfm_pose is not None:\n sfm_pose[0] *= scale\n sfm_pose[1] *= scale\n\n return img_scale, mask_scale, kp, sfm_pose\n\n def mirror_image(self, img, mask, kp=None, sfm_pose=None):\n if np.random.rand(1) > 0.5:\n # Need copy bc torch collate doesnt like neg strides\n img_flip = img[:, ::-1, :].copy()\n mask_flip = mask[:, ::-1].copy()\n\n if kp is not None:\n kp_perm = self.kp_perm\n # Flip kps.\n new_x = img.shape[1] - kp[:, 0] - 1\n kp_flip = np.hstack((new_x[:, None], kp[:, 1:]))\n if kp_perm is not None:\n kp_flip = kp_flip[kp_perm, :]\n kp = kp_flip\n\n if sfm_pose is not None:\n # Flip sfm_pose Rot.\n R = transformations.quaternion_matrix(sfm_pose[2])\n flip_R = np.diag([-1, 1, 1,\n 1]).dot(R.dot(np.diag([-1, 1, 1, 1])))\n sfm_pose[2] = transformations.quaternion_from_matrix(\n flip_R, isprecise=True\n )\n # Flip tx\n tx = img.shape[1] - sfm_pose[1][0] - 1\n sfm_pose[1][0] = tx\n\n return img_flip, mask_flip, kp, sfm_pose\n else:\n return img, mask, kp, sfm_pose\n\n def __len__(self):\n return self.num_imgs\n\n def forward_img(self, index):\n data = self.anno[index]\n\n img_path = osp.join(self.img_dir, str(data.rel_path))\n img = imageio.imread(img_path) / 255.0\n # Some are grayscale:\n if len(img.shape) == 2:\n img = np.repeat(np.expand_dims(img, 2), 3, axis=2)\n mask = np.expand_dims(data.mask, 2)\n\n # Adjust to 0 indexing\n bbox = np.array(\n [data.bbox.x1, data.bbox.y1, data.bbox.x2, data.bbox.y2], float\n ) - 1\n\n if self._out_pose:\n data_sfm = self.anno_sfm[index]\n # sfm_pose = (sfm_c, sfm_t, sfm_r)\n sfm_pose = [\n np.copy(data_sfm.scale),\n np.copy(data_sfm.trans),\n np.copy(data_sfm.rot)\n ]\n sfm_rot = np.pad(sfm_pose[2], (0, 1), 'constant')\n sfm_rot[3, 3] = 1\n sfm_pose[2] = transformations.quaternion_from_matrix(\n sfm_rot, isprecise=True\n )\n else:\n sfm_pose = None\n\n if self._out_kp:\n parts = data.parts.T.astype(float)\n kp = np.copy(parts)\n vis = kp[:, 2] > 0\n # 0 indexed from 1 indexed\n kp[vis, :2] -= 1\n kp[np.logical_not(vis), :2] = 0\n else:\n kp = None\n\n # print(kp.shape)\n # if len(kp) == 16:\n # pdb.set_trace()\n\n # Peturb bbox\n if self.opts.split == 'train':\n bbox = image_utils.peturb_bbox(\n bbox, pf=self.padding_frac, jf=self.jitter_frac\n )\n else:\n bbox = image_utils.peturb_bbox(bbox, pf=self.padding_frac, jf=0)\n bbox = image_utils.square_bbox(bbox)\n\n # crop image around bbox, translate kps\n img, mask, kp, sfm_pose = self.crop_image(\n img, mask, bbox, kp=kp, sfm_pose=sfm_pose\n )\n\n # scale image, and mask. And scale kps.\n img, mask, kp, sfm_pose = self.scale_image(\n img, mask, kp=kp, sfm_pose=sfm_pose\n )\n\n # Mirror image on random.\n if self.opts.split == 'train':\n img, mask, kp, sfm_pose = self.mirror_image(\n img, mask, kp=kp, sfm_pose=sfm_pose\n )\n # Normalize kp to be [-1, 1]\n img_h, img_w = img.shape[:2]\n if self._out_kp:\n kp = self.normalize_kp(kp, img_h, img_w)\n if self._out_pose:\n sfm_pose = self.normalize_pose(sfm_pose, img_h, img_w)\n\n # Finally transpose the image to 3xHxW\n img = np.transpose(img, (2, 0, 1))\n\n return img, mask, kp, sfm_pose\n\n def _filter(self, elem):\n if self.filter_key is not None:\n if self.filter_key not in elem.keys():\n print('Bad filter key %s' % self.filter_key)\n import ipdb\n ipdb.set_trace()\n if self.filter_key == 'sfm_pose':\n # Return both vis and sfm_pose\n vis = elem['kp'][:, 2]\n elem = {\n 'vis': vis,\n 'sfm_pose': elem['sfm_pose'],\n }\n else:\n elem = elem[self.filter_key]\n return elem\n\n def _sample_contour(\n self,\n mask,\n ):\n # indices_y, indices_x = np.where(mask)\n # npoints = len(indices_y)\n contour = measure.find_contours(mask, 0)\n contour = np.concatenate(contour)\n sample_size = self.n_contour\n\n def offset_and_clip_contour(contour, offset, img_size):\n contour = contour + offset\n contour = np.clip(contour, a_min=0, a_max=img_size - 1)\n return contour\n\n offsets = np.array(\n [\n [0, 0],\n [0, 1],\n [0, 2],\n [0, -1],\n [0, -2],\n [1, 0],\n [2, 0],\n [-1, 0],\n [-2, 0],\n [-1, -1],\n [-2, -2],\n [1, 1],\n [2, 2],\n [-1, 1],\n [-2, 2],\n [1, -1],\n [2, -2],\n ]\n )\n\n new_contours = []\n for offset in offsets:\n temp_contour = offset_and_clip_contour(\n contour, offset.reshape(-1, 2), self.img_size\n )\n new_contours.append(temp_contour)\n\n new_contours = np.concatenate(new_contours)\n # contour_mask = mask * 0\n # new_contours = new_contours.astype(np.int)\n # contour_mask[new_contours[:,0], new_contours[:,1]] = 1\n npoints = len(new_contours)\n sample_indices = np.random.choice(\n range(npoints), size=sample_size, replace=False\n )\n\n # swtich x any y.\n\n temp = np.stack(\n [new_contours[sample_indices, 1], new_contours[sample_indices, 0]],\n axis=1\n )\n temp = temp.copy()\n return temp\n\n def mask_truncated_df(self, mask):\n mask_df = ndimage.distance_transform_edt(1 - mask)\n return mask_df\n\n # def _sample_contour(self, mask, n_samples=1000):\n # contour = measure.find_contours(mask, 0)\n # contour = np.concatenate(contour)\n # sample_indices = np.random.choice(\n # range(contour.shape[0]), size=n_samples, replace=True\n # )\n # # swtich x any y.\n # samples = np.stack(\n # [contour[sample_indices, 1], contour[sample_indices, 0]], axis=1\n # )\n # return 2 * (samples / mask.shape[0] - 0.5)\n\n def __getitem__(self, index):\n if self._shuffle_inds:\n if self._index_perm is None:\n self._index_perm = np.random.RandomState(seed=0).permutation(\n self.num_imgs\n )\n index = self._index_perm[index]\n\n img, mask, kp, _ = self.forward_img(index)\n\n mask_df = self.mask_truncated_df(mask)\n contour = self._sample_contour(mask)\n valid = True\n if len(contour) != self.n_contour:\n valid = False\n\n elem = {\n 'valid': valid,\n 'img': img.astype(np.float32),\n 'mask': mask.astype(np.float32),\n 'inds': index,\n 'mask_df': mask_df.astype(np.float32),\n 'contour': contour.astype(np.float32)\n }\n if kp is not None:\n elem['kp'] = kp.astype(np.float32)\n\n if self.opts.flip_train:\n flip_img = img[:, :, ::-1].copy()\n elem['flip_img'] = flip_img\n flip_mask = mask[:, ::-1].copy()\n elem['flip_mask'] = flip_mask\n elem['flip_mask_df'] = self.mask_truncated_df(flip_mask)\n elem['flip_contour'] = self._sample_contour(flip_mask)\n\n return self._filter(elem)\n\n\n# --------- Kp + Cam Dataset --------- #\n# ------------------------------------ #\nclass BaseKpCamDataset(BaseDataset):\n ''' \n img, mask, kp, pose data loader\n '''\n def __init__(self, opts, filter_key=None):\n super(BaseKpCamDataset, self).__init__(opts, filter_key=filter_key)\n self._out_kp = True\n self._out_pose = True\n\n\n# ------------ Data Loader ----------- #\n# ------------------------------------ #\ndef base_loader(\n d_set_func,\n batch_size,\n opts,\n filter_key=None,\n shuffle=True,\n pascal_only=False\n):\n dset = d_set_func(opts, filter_key=filter_key, pascal_only=pascal_only)\n return DataLoader(\n dset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=opts.n_data_workers,\n drop_last=True,\n pin_memory=True,\n collate_fn=collate_fn,\n )\n\n\ndef collate_fn(batch):\n '''Globe data collater.\n\n Assumes each instance is a dict.\n Applies different collation rules for each field.\n\n Args:\n batch: List of loaded elements via Dataset.__getitem__\n '''\n collated_batch = {'empty': True}\n # iterate over keys\n new_batch = []\n for t in batch:\n if t['valid']:\n new_batch.append(t)\n else:\n 'Print, found an invalid batch'\n\n # batch = [t for t in batch if t is not None]\n batch = new_batch\n if len(batch) > 0:\n for key in batch[0]:\n collated_batch[key] = default_collate([elem[key] for elem in batch])\n collated_batch['empty'] = False\n return collated_batch" ]
[ [ "numpy.nanmedian", "numpy.isnan", "numpy.stack", "numpy.concatenate", "numpy.nanmean", "scipy.io.savemat", "numpy.nanstd", "numpy.array", "numpy.where", "numpy.sum" ], [ "numpy.diag", "numpy.hstack", "numpy.logical_not", "numpy.expand_dims", "numpy.pad", "numpy.clip", "torch.utils.data.DataLoader", "scipy.ndimage.distance_transform_edt", "numpy.stack", "numpy.concatenate", "numpy.copy", "numpy.shape", "numpy.random.rand", "numpy.transpose", "numpy.array", "numpy.random.RandomState", "torch.utils.data.dataloader.default_collate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mthrok/ctcdecode
[ "b1a30d7a65342012e0d2524d9bae1c5412b24a23" ]
[ "example/evalutate_wav2vec2/evaluate_wav2vec2_librispeech.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Generate `trn` files for Librispeech\n\nGiven a Librispeech directory, parse transcript files,\ntranscribe the corresponding audio, and generate hypothesis files.\n\"\"\"\nimport os\nimport time\nimport logging\nimport argparse\nfrom pathlib import Path\n\nimport torch\nimport torchaudio\nimport fairseq\nimport simple_ctc\n\n\n_LG = logging.getLogger(__name__)\n\n\ndef _parse_args():\n def _path(path):\n return Path(os.path.normpath(path))\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n )\n parser.add_argument(\n '--root-dir',\n required=True,\n type=_path,\n help='The root directory on which data are persed.'\n )\n parser.add_argument(\n '--output-dir',\n required=True,\n type=_path,\n help='The output directory where trn files are generated.'\n )\n parser.add_argument(\n '--model-file',\n required=True,\n type=_path,\n help='Path to a finetuned weight file.'\n )\n parser.add_argument(\n '--dict-file',\n required=True,\n type=_path,\n help='Path to `dict.ltr.txt` file.'\n )\n parser.add_argument(\n '--num-threads',\n type=int,\n default=4,\n help='Maximum number of threads .'\n )\n\n args = parser.parse_args()\n for path in [args.root_dir, args.output_dir, args.model_file, args.dict_file]:\n if not os.path.exists(path):\n raise RuntimeError(f'File or directory does not exist: {path}')\n return args\n\n\ndef _parse_transcript(path):\n with open(path) as trans_fileobj:\n for line in trans_fileobj:\n line = line.strip()\n if not line:\n continue\n id, transcription = line.split(' ', maxsplit=1)\n yield id, transcription\n\n\ndef _parse_transcriptions(root_dir, output_dir):\n _LG.info('Parsing transcriptions')\n audios = []\n trn = output_dir / 'ref.trn'\n txt = output_dir / 'ref.trans.txt'\n with open(trn, 'w') as trn_fileobj, open(txt, 'w') as txt_fileobj:\n for trans_file in root_dir.glob('**/*.trans.txt'):\n trans_dir = trans_file.parent\n for id, transcription in _parse_transcript(trans_file):\n trn_fileobj.write(f'{transcription} ({id})\\n')\n txt_fileobj.write(f'{id} {transcription}\\n')\n audio_path = trans_dir / f'{id}.flac'\n audios.append((id, audio_path))\n return audios\n\n\ndef _load_vocab(dict_file):\n tokens = [\"<s>\", \"<pad>\", \"</s>\", \"<unk>\"]\n with open(dict_file, mode='r', encoding='utf-8') as fileobj:\n for line in fileobj:\n tokens.append(line.split()[0])\n return tokens\n\n\ndef _count_params(model):\n return sum(p.numel() for p in model.parameters())\n\n\ndef _load_model(model_file, dict_file):\n _LG.info('Loading the model')\n labels = _load_vocab(dict_file)\n\n overrides = {'data': str(dict_file.parent)}\n\n models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(\n [str(model_file)], arg_overrides=overrides\n )\n model = models[0].eval()\n\n encoder = model.w2v_encoder\n\n decoder = simple_ctc.BeamSearchDecoder(\n labels,\n cutoff_top_n=40,\n cutoff_prob=0.8,\n beam_size=100,\n num_processes=1,\n blank_id=0,\n is_nll=True,\n )\n _LG.info('#parameters: %s', _count_params(encoder))\n return encoder, decoder\n\n\ndef _decode(audios, encoder, decoder, output_dir):\n trn = output_dir / 'hyp.trn'\n trans = output_dir / 'hyp.trans.txt'\n t_enc, t_dec, num_frames = 0.0, 0.0, 0\n with open(trn, 'w') as trn_fileobj, open(trans, 'w') as txt_fileobj:\n for i, (id, path) in enumerate(audios):\n waveform, _ = torchaudio.load(path)\n mask = torch.zeros_like(waveform)\n\n t0 = time.monotonic()\n ir = encoder(waveform, mask)['encoder_out'].transpose(1, 0)\n t1 = time.monotonic()\n result = decoder.decode(ir)\n t2 = time.monotonic()\n trn = ''.join(result.label_sequences[0][0]).replace('|', ' ')\n trn_fileobj.write(f'{trn} ({id})\\n')\n txt_fileobj.write(f'{id} {trn}\\n')\n _LG.info('%d/%d: %s: %s', i, len(audios), id, trn)\n\n num_frames += waveform.size(1)\n t_enc += t1 - t0\n t_dec += t2 - t1\n t_audio = num_frames / 16000\n _LG.info('Audio duration: %s [sec]', t_audio)\n _LG.info('Encoding Time: %s [sec]', t_enc)\n _LG.info('Decoding Time: %s [sec]', t_dec)\n _LG.info('Total Inference Time: %s [sec]', t_enc + t_dec)\n\n\ndef _main():\n args = _parse_args()\n torch.set_num_threads(args.num_threads)\n logging.basicConfig(\n format='%(asctime)s %(levelname)s: %(message)s',\n level=logging.INFO)\n audios = _parse_transcriptions(args.root_dir, args.output_dir)\n encoder, decoder = _load_model(args.model_file, args.dict_file)\n _decode(audios, encoder, decoder, args.output_dir)\n\n\nif __name__ == '__main__':\n _main()\n" ]
[ [ "torch.zeros_like", "torch.set_num_threads" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
reuvenperetz/model_optimization
[ "40de02d56750ee4cc20e693da63bc2e70b4d20e6", "40de02d56750ee4cc20e693da63bc2e70b4d20e6", "40de02d56750ee4cc20e693da63bc2e70b4d20e6" ]
[ "tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py", "tests/keras_tests/feature_networks_tests/feature_networks/tanh_activation_test.py", "tests/common_tests/function_tests/test_collectors_manipulation.py" ]
[ "# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport operator\nfrom typing import List, Any, Tuple\nimport numpy as np\nimport torch\nfrom torch.nn import Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax, \\\n Sigmoid, Softplus, Softsign, Tanh\nfrom torch.nn.functional import hardswish, hardsigmoid, relu, hardtanh, relu6, leaky_relu, prelu, silu, softmax, \\\n softplus, softsign\nfrom torch.nn import UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d\nfrom torch.nn.functional import upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d\nfrom torch.nn import Conv2d, ConvTranspose2d, Linear, BatchNorm2d\nfrom torch.nn import Dropout, Flatten\nfrom torch import add, multiply, mul, sub, flatten, reshape, split, unsqueeze, concat, cat,\\\n mean, dropout, sigmoid, tanh\nfrom torch.fx import symbolic_trace\nfrom torch.nn import Module\n\nfrom model_compression_toolkit import FrameworkInfo, pytorch_post_training_quantization\nfrom model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation\nfrom model_compression_toolkit.core.tpc_models.default_tp_model import get_default_tp_model\nfrom model_compression_toolkit.core.tpc_models.pytorch_tp_models.pytorch_default import generate_pytorch_tpc\nfrom model_compression_toolkit.core.pytorch.constants import CALL_FUNCTION, OUTPUT, CALL_METHOD, PLACEHOLDER\nfrom model_compression_toolkit.core.pytorch.reader.graph_builders import DummyPlaceHolder, ConstantHolder\nfrom model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy, to_torch_tensor\nfrom tests.common_tests.base_layer_test import BaseLayerTest, LayerTestMode\nfrom model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO\nfrom model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation\nfrom tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model\n\n\nPYTORCH_LAYER_TEST_OPS = {\n \"kernel_ops\": [Conv2d, Linear, ConvTranspose2d],\n\n \"no_quantization\": [Dropout, Flatten, ConstantHolder, dropout, flatten, split, operator.getitem, reshape,\n unsqueeze],\n\n \"activation\": [DummyPlaceHolder,\n Hardswish, Hardsigmoid, ReLU, Hardtanh, ReLU6, LeakyReLU, PReLU, SiLU, Softmax,\n Sigmoid, Softplus, Softsign, Tanh, hardswish, hardsigmoid, relu, hardtanh,\n relu6, leaky_relu, prelu,\n silu, softmax, sigmoid, softplus, softsign, tanh, torch.relu,\n UpsamplingBilinear2d, AdaptiveAvgPool2d, AvgPool2d, MaxPool2d,\n upsample_bilinear, adaptive_avg_pool2d, avg_pool2d, max_pool2d,\n add, sub, mul, multiply,\n operator.add, operator.sub, operator.mul,\n BatchNorm2d, concat, cat, mean]\n}\n\n\nclass LayerTestModel(torch.nn.Module):\n def __init__(self, layer):\n super(LayerTestModel, self).__init__()\n self.layer = layer\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass OperationTestModel(torch.nn.Module):\n def __init__(self, layer):\n super(OperationTestModel, self).__init__()\n self.layer = layer\n\n def forward(self, x, y):\n return self.layer(x, y)\n\n\ndef is_node_fake_quant(node):\n return node.target == torch.fake_quantize_per_tensor_affine\n\n\ndef get_node_operation(node, model):\n if hasattr(model, str(node.target)):\n op = getattr(model, node.target)\n elif node.op == CALL_FUNCTION:\n op = node.target\n elif node.op == CALL_METHOD:\n op = getattr(torch, node.target)\n elif node.op == PLACEHOLDER:\n op = DummyPlaceHolder\n elif node.op == OUTPUT:\n op = OUTPUT\n else:\n op = None\n return op\n\n\ndef get_layer_weights(layer):\n # extract layer weights and named buffers\n weights = {}\n named_parameters_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in\n layer.named_parameters()}\n named_buffer_weights = {name: torch_tensor_to_numpy(parameter) for name, parameter in\n layer.named_buffers() if len(parameter.shape) > 0}\n weights.update(named_parameters_weights)\n weights.update(named_buffer_weights)\n return weights\n\n\nclass BasePytorchLayerTest(BaseLayerTest):\n def __init__(self,\n unit_test,\n layers: List[Any],\n val_batch_size: int = 1,\n num_calibration_iter: int = 1,\n num_of_inputs: int = 1,\n input_shape: Tuple[int, int, int] = (3, 8, 8),\n quantization_modes: List[LayerTestMode] = [LayerTestMode.FLOAT, LayerTestMode.QUANTIZED_8_BITS],\n is_inputs_a_list: bool = False,\n use_cpu: bool = False):\n\n super().__init__(unit_test=unit_test,\n layers=layers,\n val_batch_size=val_batch_size,\n num_calibration_iter=num_calibration_iter,\n num_of_inputs=num_of_inputs,\n input_shape=input_shape,\n quantization_modes=quantization_modes,\n is_inputs_a_list=is_inputs_a_list,\n use_cpu=use_cpu)\n\n def get_tpc(self):\n if self.current_mode == LayerTestMode.FLOAT:\n # Disable all features that are enabled by default:\n tp = generate_test_tp_model({'enable_weights_quantization': False,\n 'enable_activation_quantization': False})\n return generate_pytorch_tpc(name=\"base_layer_test\", tp_model=tp)\n elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:\n tp = generate_test_tp_model({'weights_n_bits': 8,\n 'activation_n_bits': 8})\n return generate_pytorch_tpc(name=\"8bit_layer_test\", tp_model=tp)\n else:\n raise NotImplemented\n\n def get_fw_info(self) -> FrameworkInfo:\n return DEFAULT_PYTORCH_INFO\n\n def get_fw_impl(self) -> FrameworkImplementation:\n return PytorchImplementation()\n\n def get_ptq_facade(self):\n return pytorch_post_training_quantization\n\n def generate_inputs(self):\n return to_torch_tensor([torch.randn(*in_shape) for in_shape in self.get_input_shapes()])\n\n def create_networks(self):\n models = []\n for layer in self.get_layers():\n if self.num_of_inputs > 1:\n models.append(OperationTestModel(layer))\n else:\n models.append(LayerTestModel(layer))\n return models\n\n\n def compare(self, quantized_model: Module, float_model: Module, input_x=None, quantization_info=None):\n quantized_model_fx = symbolic_trace(quantized_model)\n # Assert things that should happen when using FLOAT quantization mode\n if self.current_mode == LayerTestMode.FLOAT:\n self.__compare_float_mode(float_model, quantized_model, quantized_model_fx)\n\n # Assert things that should happen when using QUANTIZED_8_BITS quantization mode\n elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:\n self.__compare_8bits_quantization_mode(float_model, quantized_model, quantized_model_fx)\n\n # Check inference is possible\n input_tensors = self.generate_inputs()\n quantized_model(*input_tensors)\n quantized_model_fx(*input_tensors)\n\n def __compare_8bits_quantization_mode(self, float_model, quantized_model, quantized_model_fx):\n fw_info = self.get_fw_info()\n for node in quantized_model_fx.graph.nodes:\n op = get_node_operation(node, quantized_model)\n if op == OUTPUT or op == operator.getitem or is_node_fake_quant(node):\n continue\n if hasattr(quantized_model, str(node.target)):\n if type(op) in PYTORCH_LAYER_TEST_OPS['kernel_ops']:\n quantized_weights = get_layer_weights(getattr(quantized_model, node.target))\n float_weights = get_layer_weights(getattr(float_model, node.target))\n for k, v in quantized_weights.items():\n if k in fw_info.kernel_ops_attributes_mapping.get(type(op)):\n float_weight = float_weights.get(k)\n self.unit_test.assertFalse(float_weight is None)\n self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) > 0.0)\n node_next = node.next\n while get_node_operation(node_next, quantized_model) == operator.getitem:\n node_next = node_next.next\n self.unit_test.assertTrue(is_node_fake_quant(node_next))\n\n elif op in PYTORCH_LAYER_TEST_OPS['activation']:\n node_next = node.next\n while get_node_operation(node_next, quantized_model) == operator.getitem:\n node_next = node_next.next\n self.unit_test.assertTrue(is_node_fake_quant(node_next))\n\n elif op in PYTORCH_LAYER_TEST_OPS['no_quantization']:\n node_next = node.next\n while get_node_operation(node_next, quantized_model) == operator.getitem:\n node_next = node_next.next\n self.unit_test.assertFalse(is_node_fake_quant(node_next))\n else:\n raise Exception(f'Layer {op} is not in framework info')\n\n def __compare_float_mode(self, float_model, quantized_model, quantized_model_fx):\n for node in quantized_model_fx.graph.nodes:\n # Check there are no fake-quant layers\n self.unit_test.assertFalse(is_node_fake_quant(node))\n # check unchanged weights\n if hasattr(quantized_model, str(node.target)):\n quantized_weights = get_layer_weights(getattr(quantized_model, node.name))\n float_weights = get_layer_weights(getattr(float_model, node.name))\n for k, v in quantized_weights.items():\n float_weight = float_weights.get(k)\n self.unit_test.assertFalse(float_weight is None)\n self.unit_test.assertTrue(np.sum(np.abs(v - float_weight)) == 0.0)\n input_tensors = self.generate_inputs()\n y = float_model(*input_tensors)\n y_hat = quantized_model(*input_tensors)\n if isinstance(y, (list, tuple)):\n for fo, qo in zip(y, y_hat):\n distance = torch_tensor_to_numpy(torch.sum(torch.abs(fo - qo)))\n self.unit_test.assertTrue(distance == 0,\n msg=f'Outputs should be identical. Observed distance: {distance}')\n\n else:\n distance = torch_tensor_to_numpy(torch.sum(torch.abs(y - y_hat)))\n self.unit_test.assertTrue(distance == 0,\n msg=f'Outputs should be identical. Observed distance: {distance}')\n", "# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport model_compression_toolkit.core.target_platform.op_quantization_config\nfrom tests.common_tests.base_feature_test import BaseFeatureNetworkTest\nimport model_compression_toolkit as mct\nimport tensorflow as tf\nfrom tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest\nimport numpy as np\nfrom tests.common_tests.helpers.tensors_compare import cosine_similarity\n\nkeras = tf.keras\nlayers = keras.layers\n\n\nclass TanhActivationTest(BaseKerasFeatureNetworkTest):\n def __init__(self, unit_test):\n super().__init__(unit_test)\n\n def get_quantization_config(self):\n return mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE, mct.QuantizationErrorMethod.MSE,\n model_compression_toolkit.target_platform.QuantizationMethod.POWER_OF_TWO,\n model_compression_toolkit.target_platform.op_quantization_config.QuantizationMethod.POWER_OF_TWO,\n 16, 16,\n True, True, True)\n\n\n def create_networks(self):\n inputs = layers.Input(shape=self.get_input_shapes()[0][1:])\n x = layers.Conv2D(3, 4)(inputs)\n x = layers.BatchNormalization()(x)\n outputs = layers.Activation('tanh')(x)\n return keras.Model(inputs=inputs, outputs=outputs)\n\n def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):\n y = float_model.predict(input_x)\n y_hat = quantized_model.predict(input_x)\n cs = cosine_similarity(y, y_hat)\n self.unit_test.assertTrue(np.isclose(cs, 1), msg=f'fail cosine similarity check:{cs}')\n", "# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\nimport unittest\nimport numpy as np\nfrom model_compression_toolkit.core.common.collectors.statistics_collector import StatsCollector\nfrom model_compression_toolkit.core.common.collectors.statistics_collector import scale_statistics\nfrom model_compression_toolkit.core.common.collectors.statistics_collector import shift_statistics\nfrom model_compression_toolkit.core.common.framework_info import ChannelAxis\n\n\ndef init_stats_container(num_of_input_channels, init_min=None, init_max=None):\n sc = StatsCollector(init_min_value=init_min, init_max_value=init_max, output_channel_index=ChannelAxis.NHWC)\n x = np.random.rand(1, 2, 3, num_of_input_channels)\n for i in range(100):\n sc.update_statistics(x)\n return sc\n\n\ndef scale_stats_container(sc, num_of_scaling_factors):\n scaling_factor = np.random.random(num_of_scaling_factors)\n scaled_sc = scale_statistics(sc, scaling_factor)\n return scaled_sc, scaling_factor\n\n\ndef shift_stats_container(sc, num_of_shifting_factors):\n shifting_factor = np.random.random(num_of_shifting_factors)\n shifted_sc = shift_statistics(sc, shifting_factor)\n return shifted_sc, shifting_factor\n\n\nclass TestCollectorsManipulations(unittest.TestCase):\n\n ########### Test scaling ###########\n def test_mean_scale_per_channel(self, num_of_scaling_factors=10):\n sc = init_stats_container(num_of_scaling_factors)\n mean = sc.get_mean()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)\n scaled_mean = scaled_sc.get_mean()\n self.assertTrue(np.allclose(scaled_mean / scaling_factor, mean))\n\n def test_mean_scale_per_tensor(self, num_of_scaling_factors=1):\n sc = init_stats_container(num_of_scaling_factors)\n mean = sc.get_mean()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)\n scaled_mean = scaled_sc.get_mean()\n self.assertTrue(np.allclose(scaled_mean / scaling_factor, mean))\n\n def test_histogram_scale_per_channel(self, num_of_scaling_factors=10):\n sc = init_stats_container(num_of_scaling_factors)\n bins, _ = sc.hc.get_histogram()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)\n with self.assertRaises(Exception):\n scaled_sc.hc.get_histogram() # data is corrupted. expect exception\n\n def test_histogram_scale_per_tensor(self, num_of_scaling_factors=1):\n sc = init_stats_container(num_of_scaling_factors)\n bins, _ = sc.hc.get_histogram()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)\n scaled_bins, _ = scaled_sc.hc.get_histogram()\n self.assertTrue(np.allclose(scaled_bins / scaling_factor, bins))\n\n def test_min_max_scale_per_channel(self, num_of_scaling_factors=10):\n sc = init_stats_container(num_of_scaling_factors)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)\n min_pc_scaled, max_pc_scaled = scaled_sc.mpcc.min_per_channel, scaled_sc.mpcc.max_per_channel\n self.assertTrue(np.allclose(min_pc_scaled / scaling_factor, min_pc))\n self.assertTrue(np.allclose(max_pc_scaled / scaling_factor, max_pc))\n\n def test_min_max_scale_per_tensor(self, num_of_scaling_factors=1):\n sc = init_stats_container(num_of_scaling_factors)\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_scaling_factors)\n self.assertTrue(np.allclose(scaled_sc.get_min_max_values() / scaling_factor, sc.get_min_max_values()))\n\n ########### Test shifting ############\n def test_mean_shift_per_channel(self, num_of_shifting_factors=10):\n sc = init_stats_container(num_of_shifting_factors)\n mean = sc.get_mean()\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n shifted_mean = shifted_sc.get_mean()\n self.assertTrue(np.allclose(shifted_mean - shifting_factor, mean))\n\n def test_mean_shift_per_tensor(self, num_of_shifting_factors=1):\n sc = init_stats_container(num_of_shifting_factors)\n mean = sc.get_mean()\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n shifted_mean = shifted_sc.get_mean()\n self.assertTrue(np.allclose(shifted_mean - shifting_factor, mean))\n\n def test_histogram_shift_per_channel(self, num_of_shifting_factors=10):\n sc = init_stats_container(num_of_shifting_factors)\n bins, _ = sc.hc.get_histogram()\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n with self.assertRaises(Exception):\n shifted_sc.hc.get_histogram() # data is corrupted. expect exception\n\n def test_histogram_shift_per_tensor(self, num_of_shifting_factors=1):\n sc = init_stats_container(num_of_shifting_factors)\n bins, _ = sc.hc.get_histogram()\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n shifted_bins, _ = shifted_sc.hc.get_histogram()\n self.assertTrue(np.allclose(shifted_bins - shifting_factor, bins))\n\n def test_min_max_shift_per_channel(self, num_of_shifting_factors=10):\n sc = init_stats_container(num_of_shifting_factors)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n min_pc_shifted, max_pc_shifted = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n self.assertTrue(np.allclose(min_pc_shifted - shifting_factor, min_pc))\n self.assertTrue(np.allclose(max_pc_shifted - shifting_factor, max_pc))\n\n def test_min_max_shift_per_tensor(self, num_of_shifting_factors=1):\n sc = init_stats_container(num_of_shifting_factors)\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n self.assertTrue(np.allclose(shifted_sc.get_min_max_values() - shifting_factor, sc.get_min_max_values()))\n\n ########### Test scaling -> shifting (same granularity) ###########\n def test_mean_scale_shift_per_channel(self, num_of_input_channels=10):\n sc = init_stats_container(num_of_input_channels)\n mean = sc.get_mean()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_mean = final_sc.get_mean()\n restored_mean = (final_mean - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_mean_scale_shift_per_tensor(self, num_of_input_channels=1):\n sc = init_stats_container(num_of_input_channels)\n mean = sc.get_mean()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_mean = final_sc.get_mean()\n restored_mean = (final_mean - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_histogram_scale_shift_per_channel(self, num_of_input_channels=10):\n sc = init_stats_container(num_of_input_channels)\n bins, _ = sc.hc.get_histogram()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n with self.assertRaises(Exception):\n final_sc.hc.get_histogram()\n\n def test_histogram_scale_shift_per_tensor(self, num_of_input_channels=1):\n sc = init_stats_container(num_of_input_channels)\n bins, _ = sc.hc.get_histogram()\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_bins, _ = final_sc.hc.get_histogram()\n restored_bins = (final_bins - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_bins, bins))\n\n def test_minmax_scale_shift_per_channel(self, num_of_input_channels=10):\n sc = init_stats_container(num_of_input_channels)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n restored_min_pc = (min_pc_final - shifting_factor) / scaling_factor\n restored_max_pc = (max_pc_final - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_scale_shift_per_tensor(self, num_of_input_channels=1):\n sc = init_stats_container(num_of_input_channels)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n restored_min_pc = (min_pc_final - shifting_factor) / scaling_factor\n restored_max_pc = (max_pc_final - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n ########### Test scaling -> shifting -> scaling (same granularity) ###########\n def test_mean_scale_shift_scale_per_channel(self, num_of_input_channels=10):\n sc = init_stats_container(num_of_input_channels)\n mean = sc.get_mean()\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n shifted_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_sc, scaling_factor2 = scale_stats_container(shifted_sc, num_of_input_channels)\n\n final_mean = final_sc.get_mean()\n restored_mean = (final_mean / scaling_factor2 - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_mean_scale_shift_scale_per_tensor(self, num_of_input_channels=1):\n sc = init_stats_container(num_of_input_channels)\n mean = sc.get_mean()\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n shifted_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_sc, scaling_factor2 = scale_stats_container(shifted_sc, num_of_input_channels)\n\n final_mean = final_sc.get_mean()\n restored_mean = (final_mean / scaling_factor2 - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_histogram_scale_shift_scale_per_channel(self, num_of_input_channels=10):\n sc = init_stats_container(num_of_input_channels)\n bins, _ = sc.hc.get_histogram()\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n shifted_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_sc, scaling_factor2 = scale_stats_container(shifted_sc, num_of_input_channels)\n\n with self.assertRaises(Exception):\n final_sc.hc.get_histogram()\n\n def test_histogram_scale_shift_scale_per_tensor(self, num_of_input_channels=1):\n sc = init_stats_container(num_of_input_channels)\n bins, _ = sc.hc.get_histogram()\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n shifted_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_sc, scaling_factor2 = scale_stats_container(shifted_sc, num_of_input_channels)\n\n final_bins, _ = final_sc.hc.get_histogram()\n restored_bins = (final_bins / scaling_factor2 - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_bins, bins))\n\n def test_minmax_scale_shift_scale_per_channel(self, num_of_input_channels=10):\n sc = init_stats_container(num_of_input_channels)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n shifted_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_sc, scaling_factor2 = scale_stats_container(shifted_sc, num_of_input_channels)\n\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n restored_min_pc = (min_pc_final / scaling_factor2 - shifting_factor) / scaling_factor\n restored_max_pc = (max_pc_final / scaling_factor2 - shifting_factor) / scaling_factor\n\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_scale_shift_scale_per_tensor(self, num_of_input_channels=1):\n sc = init_stats_container(num_of_input_channels)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_of_input_channels)\n shifted_sc, shifting_factor = shift_stats_container(scaled_sc, num_of_input_channels)\n final_sc, scaling_factor2 = scale_stats_container(shifted_sc, num_of_input_channels)\n\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n restored_min_pc = (min_pc_final / scaling_factor2 - shifting_factor) / scaling_factor\n restored_max_pc = (max_pc_final / scaling_factor2 - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n ########### Test scaling -> shifting (different granularity) ###########\n def test_mean_scale_per_channel_shift_per_tensor(self, num_scale_factors=10, num_shift_factors=1):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n mean = sc.get_mean()\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_scale_factors)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_shift_factors)\n\n final_mean = final_sc.get_mean()\n restored_mean = (final_mean - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_mean_scale_per_tensor_shift_per_channel(self, num_scale_factors=1, num_shift_factors=10):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n mean = sc.get_mean()\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_scale_factors)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_shift_factors)\n\n final_mean = final_sc.get_mean()\n restored_mean = (final_mean - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_histogram_scale_per_channel_shift_per_tensor(self, num_scale_factors=10, num_shift_factors=1):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_scale_factors)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_shift_factors)\n\n with self.assertRaises(Exception):\n final_sc.hc.get_histogram()\n\n def test_histogram_scale_per_tensor_shift_per_channel(self, num_scale_factors=1, num_shift_factors=10):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_scale_factors)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_shift_factors)\n\n with self.assertRaises(Exception):\n final_sc.hc.get_histogram()\n\n def test_minmax_scale_per_channel_shift_per_tensor(self, num_scale_factors=10, num_shift_factors=1):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_scale_factors)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_shift_factors)\n\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n\n restored_min_pc = (min_pc_final - shifting_factor) / scaling_factor\n restored_max_pc = (max_pc_final - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_scale_per_tensor_shift_per_channel(self, num_scale_factors=1, num_shift_factors=10):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n\n scaled_sc, scaling_factor = scale_stats_container(sc, num_scale_factors)\n final_sc, shifting_factor = shift_stats_container(scaled_sc, num_shift_factors)\n\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n\n restored_min_pc = (min_pc_final - shifting_factor) / scaling_factor\n restored_max_pc = (max_pc_final - shifting_factor) / scaling_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n ########### Test shifting -> scaling (different granularity) ###########\n\n def test_mean_shift_per_channel_scale_per_tensor(self, num_scale_factors=10, num_shift_factors=1):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n mean = sc.get_mean()\n\n shifted_sc, shifting_factor = shift_stats_container(sc, num_shift_factors)\n final_sc, scaling_factor = scale_stats_container(shifted_sc, num_scale_factors)\n\n final_mean = final_sc.get_mean()\n restored_mean = final_mean / scaling_factor - shifting_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_mean_shift_per_tensor_scale_per_channel(self, num_scale_factors=1, num_shift_factors=10):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n mean = sc.get_mean()\n\n shifted_sc, shifting_factor = shift_stats_container(sc, num_shift_factors)\n final_sc, scaling_factor = scale_stats_container(shifted_sc, num_scale_factors)\n\n final_mean = final_sc.get_mean()\n restored_mean = final_mean / scaling_factor - shifting_factor\n self.assertTrue(np.allclose(restored_mean, mean))\n\n def test_histogram_shift_per_channel_scale_per_tensor(self, num_scale_factors=10, num_shift_factors=1):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n\n shifted_sc, shifting_factor = shift_stats_container(sc, num_shift_factors)\n final_sc, scaling_factor = scale_stats_container(shifted_sc, num_scale_factors)\n\n with self.assertRaises(Exception):\n final_sc.hc.get_histogram()\n\n def test_histogram_shift_per_tensor_scale_per_channel(self, num_scale_factors=1, num_shift_factors=10):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n\n shifted_sc, shifting_factor = shift_stats_container(sc, num_shift_factors)\n final_sc, scaling_factor = scale_stats_container(shifted_sc, num_scale_factors)\n\n with self.assertRaises(Exception):\n final_sc.hc.get_histogram()\n\n def test_minmax_shift_per_channel_scale_per_tensor(self, num_scale_factors=10, num_shift_factors=1):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n\n shifted_sc, shifting_factor = shift_stats_container(sc, num_shift_factors)\n final_sc, scaling_factor = scale_stats_container(shifted_sc, num_scale_factors)\n\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n\n restored_min_pc = min_pc_final / scaling_factor - shifting_factor\n restored_max_pc = max_pc_final / scaling_factor - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_shift_per_tensor_scale_per_channel(self, num_scale_factors=1, num_shift_factors=10):\n sc = init_stats_container(max(num_scale_factors, num_shift_factors))\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n\n shifted_sc, shifting_factor = shift_stats_container(sc, num_shift_factors)\n final_sc, scaling_factor = scale_stats_container(shifted_sc, num_scale_factors)\n\n min_pc_final, max_pc_final = final_sc.mpcc.min_per_channel, final_sc.mpcc.max_per_channel\n\n restored_min_pc = min_pc_final / scaling_factor - shifting_factor\n restored_max_pc = max_pc_final / scaling_factor - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n ########### Test shifting for collector with init values ###########\n def test_minmax_shift_per_channel_init_min(self, num_of_shifting_factors=10):\n sc = init_stats_container(num_of_shifting_factors, init_min=0)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final - shifting_factor\n restored_max_pc = max_pc_final - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n min_final, max_final = shifted_sc.get_min_max_values()\n self.assertTrue(np.allclose(min_final, np.min(min_pc + shifting_factor)))\n self.assertTrue(np.allclose(max_final, np.max(max_pc + shifting_factor)))\n\n def test_minmax_shift_per_channel_init_max(self, num_of_shifting_factors=10):\n sc = init_stats_container(num_of_shifting_factors, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final - shifting_factor\n restored_max_pc = max_pc_final - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n min_final, max_final = shifted_sc.get_min_max_values()\n # init values should be ignored as stats were shifted per-channel\n self.assertTrue(np.allclose(max_final, np.max(max_pc + shifting_factor)))\n self.assertTrue(np.allclose(min_final, np.min(min_pc + shifting_factor)))\n\n def test_minmax_shift_per_tensor_init_min(self, num_of_shifting_factors=1):\n sc = init_stats_container(num_of_shifting_factors, init_min=0)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final - shifting_factor\n restored_max_pc = max_pc_final - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_shift_per_tensor_init_max(self, num_of_shifting_factors=1):\n sc = init_stats_container(num_of_shifting_factors, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final - shifting_factor\n restored_max_pc = max_pc_final - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_shift_per_channel_init_minmax(self, num_of_shifting_factors=10):\n sc = init_stats_container(num_of_shifting_factors, init_min=0, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final - shifting_factor\n restored_max_pc = max_pc_final - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n min_final, max_final = shifted_sc.get_min_max_values()\n # init values should be ignored as stats were shifted per-channel\n self.assertTrue(np.allclose(min_final, np.min(min_pc + shifting_factor)))\n self.assertTrue(np.allclose(max_final, np.max(max_pc + shifting_factor)))\n\n def test_minmax_shift_per_tensor_init_minmax(self, num_of_shifting_factors=1):\n sc = init_stats_container(num_of_shifting_factors, init_min=0, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = shift_stats_container(sc, num_of_shifting_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final - shifting_factor\n restored_max_pc = max_pc_final - shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n ########### Test scaling for collector with init values ###########\n def test_minmax_scale_per_channel_init_min(self, num_of_scaling_factors=10):\n sc = init_stats_container(num_of_scaling_factors, init_min=0)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = scale_stats_container(sc, num_of_scaling_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final / shifting_factor\n restored_max_pc = max_pc_final / shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n min_final, max_final = shifted_sc.get_min_max_values()\n self.assertTrue(np.allclose(min_final, np.min(min_pc * shifting_factor)))\n self.assertTrue(np.allclose(max_final, np.max(max_pc * shifting_factor)))\n\n def test_minmax_scale_per_channel_init_max(self, num_of_scaling_factors=10):\n sc = init_stats_container(num_of_scaling_factors, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = scale_stats_container(sc, num_of_scaling_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final / shifting_factor\n restored_max_pc = max_pc_final / shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n min_final, max_final = shifted_sc.get_min_max_values()\n self.assertTrue(np.allclose(max_final, np.max(max_pc * shifting_factor)))\n self.assertTrue(np.allclose(min_final, np.min(min_pc * shifting_factor)))\n\n def test_minmax_scale_per_tensor_init_min(self, num_of_scaling_factors=1):\n sc = init_stats_container(num_of_scaling_factors, init_min=0)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = scale_stats_container(sc, num_of_scaling_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final / shifting_factor\n restored_max_pc = max_pc_final / shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_scale_per_tensor_init_max(self, num_of_scaling_factors=1):\n sc = init_stats_container(num_of_scaling_factors, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = scale_stats_container(sc, num_of_scaling_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final / shifting_factor\n restored_max_pc = max_pc_final / shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n def test_minmax_scale_per_channel_init_minmax(self, num_of_scaling_factors=10):\n sc = init_stats_container(num_of_scaling_factors, init_min=0, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = scale_stats_container(sc, num_of_scaling_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final / shifting_factor\n restored_max_pc = max_pc_final / shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n min_final, max_final = shifted_sc.get_min_max_values()\n self.assertTrue(np.allclose(min_final, np.min(min_pc * shifting_factor)))\n self.assertTrue(np.allclose(max_final, np.max(max_pc * shifting_factor)))\n\n def test_minmax_scale_per_tensor_init_minmax(self, num_of_scaling_factors=1):\n sc = init_stats_container(num_of_scaling_factors, init_min=0, init_max=99)\n min_pc, max_pc = sc.mpcc.min_per_channel, sc.mpcc.max_per_channel\n shifted_sc, shifting_factor = scale_stats_container(sc, num_of_scaling_factors)\n min_pc_final, max_pc_final = shifted_sc.mpcc.min_per_channel, shifted_sc.mpcc.max_per_channel\n restored_min_pc = min_pc_final / shifting_factor\n restored_max_pc = max_pc_final / shifting_factor\n self.assertTrue(np.allclose(restored_min_pc, min_pc))\n self.assertTrue(np.allclose(restored_max_pc, max_pc))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.fx.symbolic_trace", "torch.abs", "numpy.abs", "torch.randn" ], [ "numpy.isclose" ], [ "numpy.random.random", "numpy.allclose", "numpy.min", "numpy.max", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
myu-wp/coveragecalc
[ "e2fac3baba3f240c8d776f7d28331899333a05c2" ]
[ "coveragecalc/fields.py" ]
[ "import numpy as np\n\n\nOUTPUTS = [\n 'primary phone is valid',\n 'primary phone to name',\n 'primary phone to address',\n 'primary phone line type',\n 'primary phone is prepaid',\n 'primary phone is commercial',\n 'primary address is valid',\n 'primary address diagnostic',\n 'primary address to name',\n 'primary address type',\n 'primary address is active',\n 'primary address is commercial',\n 'primary address is forwarder',\n 'secondary phone is valid',\n 'secondary phone to name',\n 'secondary phone to address',\n 'secondary phone line type',\n 'secondary phone is prepaid',\n 'secondary phone is commercial',\n 'secondary address is valid',\n 'secondary address diagnostic',\n 'secondary address to name',\n 'secondary address type',\n 'secondary address is active',\n 'secondary address is commercial',\n 'secondary address is forwarder',\n 'email is valid',\n 'email is disposable',\n 'email is auto-generated',\n 'email to name',\n 'email first seen days binned',\n 'ip is valid',\n 'ip distance from address binned',\n 'ip distance from phone binned',\n 'ip is proxy',\n 'ip connection type',\n 'confidence score binned',\n]\n\nBINS = {\n 'email first seen days': {\n 'labels': ['Never', '< 3 months', '3 months to a year', '1-4 years', '5+ years'],\n 'bins': [0, 1, 180, 365, 1825, np.inf],\n },\n 'ip distance from address': {\n 'labels': ['0-9', '10-99', '100-999', '1000+'],\n 'bins': [0, 10, 100, 1000, np.inf],\n },\n 'ip distance from phone': {\n 'labels': ['0-9', '10-99', '100-999', '1000+'],\n 'bins': [0, 10, 100, 1000, np.inf],\n },\n 'confidence score': {\n 'bins': np.arange(0,525,25),\n 'labels': ['0-25', '25-50', '50-75', '75-100', '100-125', '125-150', \n '150-175', '175-200', '200-225', '225-250', '250-275',\n '275-300', '300-325', '325-350', '350-375', '375-400',\n '400-425', '425-450', '450-475', '475-500',],\n },\n}\n\nto_name = [\n 'Match',\n 'No match',\n 'No name found',\n]\nto_address = [\n 'Match',\n 'Zip+4 match',\n 'Postal match',\n 'City/State match',\n 'No match',\n]\nline_type = [\n 'Mobile',\n 'Landline',\n 'Fixed VOIP',\n 'Non-fixed VOIP',\n 'Premium',\n 'Tollfree',\n 'Voicemail',\n 'Other',\n 'Unknown',\n]\naddress_type = [\n 'Commercial mail drop',\n 'Multi unit',\n 'Single unit',\n 'PO box',\n 'PO box throwback',\n 'Unknown address type',\n]\naddress_diagnostic = [\n 'Validated',\n 'Validated with corrections',\n 'Validated only Street, Postcode, City, Country. Premise not validated',\n 'Validated only Postcode, City, Country',\n 'Validated only City, Country',\n 'Validated only Country',\n]\nCATEGORIES = {\n 'primary phone to name': to_name,\n 'secondary phone to name': to_name,\n 'primary address to name': to_name,\n 'secondary address to name': to_name,\n 'email to name': to_name,\n 'primary phone to address': to_address,\n 'secondary phone to address': to_address,\n 'primary phone line type': line_type,\n 'secondary phone line type': line_type,\n 'primary address type': address_type,\n 'secondary address type': address_type,\n 'primary address diagnostic': address_diagnostic,\n 'secondary address diagnostic': address_diagnostic,\n 'ip connection type': [\n 'Cable/DSL',\n 'Corporate',\n 'Cellular',\n 'Dialup',\n ],\n}" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aksingh-fb/glow
[ "c50603a1885c9bffd935fbd1c7c10766b062cef9" ]
[ "torch_glow/tests/nodes/floor_test.py" ]
[ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport unittest\n\nimport torch\nfrom tests import utils\n\n\nclass SimpleFloorModule(torch.nn.Module):\n def forward(self, a, b):\n c = a + b\n return torch.floor(c)\n\n\nclass TestFloor(unittest.TestCase):\n def test_floor(self):\n \"\"\"Basic test of the PyTorch floor Node on Glow.\"\"\"\n\n x = torch.randn(3, 4, 5)\n y = torch.randn(3, 4, 5)\n utils.compare_tracing_methods(\n SimpleFloorModule(), x, y, fusible_ops={\"aten::floor\"}\n )\n" ]
[ [ "torch.randn", "torch.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rapirent/DSAI-HW3
[ "ee83990f511049b8d53be5765040ab2068af6c3f" ]
[ "addition-subtractor.py" ]
[ "\n# coding: utf-8\nfrom keras.models import Sequential\nfrom keras import layers\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom six.moves import range\n\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_size', default='45000')\nparser.add_argument('--train_size', default='40000')\nparser.add_argument('--digits', default='3')\nparser.add_argument('--epoch', default='2')\nparser.add_argument('--activation', default='softmax')\nparser.add_argument('--output_name', default='model_1')\nargs = parser.parse_args()\n\n# # Parameters Config\nclass colors:\n ok = '\\033[92m'\n fail = '\\033[91m'\n close = '\\033[0m'\n\nDATA_SIZE = int(args.data_size)\nTRAIN_SIZE = int(args.train_size)\nDIGITS = int(args.digits)\nREVERSE = False\nMAXLEN = DIGITS + 1 + DIGITS\nchars = '0123456789+- '\nRNN = layers.LSTM\nHIDDEN_SIZE = 128\nBATCH_SIZE = 128\nEPOCH_SIZE = int(args.epoch)\nLAYERS = 1\nACTIVATION = args.activation\n\noutput_file = open('./data/as-' + args.output_name, 'w')\nprint('DATA_SIZE = ', DATA_SIZE , file=output_file)\nprint('TRAIN_SIZE = ', TRAIN_SIZE, file=output_file)\nprint('DIGITS = ', DIGITS, file=output_file)\nprint('EPOCH_SIZE = ', EPOCH_SIZE, file=output_file)\nprint('ACTIVATION = ', ACTIVATION, file=output_file)\n\nclass CharacterTable(object):\n def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))\n\n def encode(self, C, num_rows):\n x = np.zeros((num_rows, len(self.chars)))\n for i, c in enumerate(C):\n x[i, self.char_indices[c]] = 1\n return x\n\n def decode(self, x, calc_argmax=True):\n if calc_argmax:\n x = x.argmax(axis=-1)\n return \"\".join(self.indices_char[i] for i in x)\n\nctable = CharacterTable(chars)\n\nctable.indices_char\n\n\n# # Data Generation\nquestions = []\nexpected = []\nseen = set()\nprint('Generating data...')\n\nwhile len(questions) < DATA_SIZE:\n f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))\n a, b = f(), f()\n if len(questions) % 2 == 0:\n q = '{}-{}'.format(a, b)\n query = q + ' ' * (MAXLEN - len(q))\n ans = str(a - b)\n else:\n q = '{}+{}'.format(a, b)\n query = q + ' ' * (MAXLEN - len(q))\n ans = str(a + b)\n if q in seen:\n continue\n seen.add(q)\n ans += ' ' * (DIGITS + 1 - len(ans))\n if REVERSE:\n query = query[::-1]\n questions.append(query)\n expected.append(ans)\nprint('Total addition questions:', len(questions))\n\nprint(questions[:5], expected[:5])\n\n\n# # Processing\nprint('Vectorization... (to the one-hot encoding)')\nx = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)\ny = np.zeros((len(expected), DIGITS + 1, len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(questions):\n x[i] = ctable.encode(sentence, MAXLEN)\nfor i, sentence in enumerate(expected):\n y[i] = ctable.encode(sentence, DIGITS + 1)\n\nindices = np.arange(len(y))\nnp.random.shuffle(indices)\nprint(indices)\nx = x[indices]\ny = y[indices]\n\n# train_test_split\ntrain_x = x[:TRAIN_SIZE]\ntrain_y = y[:TRAIN_SIZE]\ntest_x = x[TRAIN_SIZE:]\ntest_y = y[TRAIN_SIZE:]\n\nprint('Training Data:')\nprint(train_x.shape)\nprint(train_y.shape)\n\nsplit_at = len(train_x) - len(train_x) // 10\nprint('split_at', split_at)\n(x_train, x_val) = train_x[:split_at], train_x[split_at:]\n(y_train, y_val) = train_y[:split_at], train_y[split_at:]\n\nprint('Training Data:')\nprint(x_train.shape)\nprint(y_train.shape)\n\nprint('Validation Data:')\nprint(x_val.shape)\nprint(y_val.shape)\n\nprint('Testing Data:')\nprint(test_x.shape)\nprint(test_y.shape)\n\nprint(\"input: \", x_train[:3], '\\n\\n', \"label: \", y_train[:3])\n\n\n# # Build Model\nprint('Build model...')\nmodel = Sequential()\nmodel.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))\nmodel.add(layers.RepeatVector(DIGITS + 1))\nfor _ in range(LAYERS):\n model.add(RNN(HIDDEN_SIZE, return_sequences=True))\n\nmodel.add(layers.TimeDistributed(layers.Dense(len(chars))))\nmodel.add(layers.Activation(ACTIVATION))\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nmodel.summary()\n\nprint('train set = ', x_train.shape, 'validation set = ', x_val.shape, file=output_file)\nacc = []\nval_acc = []\nloss = []\nval_loss = []\n# # Training\nfor loop in range(100):\n print()\n print('-' * 50)\n print('Train Loop Num:', loop)\n history = model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=EPOCH_SIZE,\n validation_data=(x_val, y_val),\n shuffle=True)\n acc += history.history['acc']\n val_acc += history.history['val_acc']\n loss += history.history['loss']\n val_loss += history.history['val_loss']\n print('loop ', loop, file=output_file)\n print('acc = {} '.format(history.history['acc']), end='', file=output_file)\n print('val_acc = {} '.format(history.history['val_acc']), end='', file=output_file)\n print('loss = {} '.format(history.history['loss']), end='', file=output_file)\n print('val_loss = {} '.format(history.history['val_loss']), file=output_file)\n print('-' * 50 , file=output_file)\n\n for i in range(10):\n ind = np.random.randint(0, len(x_val))\n rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]\n preds = model.predict_classes(rowx, verbose=0)\n q = ctable.decode(rowx[0])\n correct = ctable.decode(rowy[0])\n guess = ctable.decode(preds[0], calc_argmax=False)\n print('Q', q[::-1] if REVERSE else q, end=' ')\n print('T', correct, end=' ')\n if correct == guess:\n print(colors.ok + '☑' + colors.close, end=' ')\n else:\n print(colors.fail + '☒' + colors.close, end=' ')\n print(guess)\n\n\n# # Testing\nprint(\"MSG : Prediction\")\nprint(\"-\" * 50)\nright = 0\npreds = model.predict_classes(test_x, verbose=0)\nfor i in range(len(preds)):\n q = ctable.decode(test_x[i])\n correct = ctable.decode(test_y[i])\n guess = ctable.decode(preds[i], calc_argmax=False)\n print('Q', q[::-1] if REVERSE else q, end=' ')\n print('T', correct, end=' ')\n if correct == guess:\n print(colors.ok + '☑' + colors.close, end=' ')\n right += 1\n else:\n print(colors.fail + '☒' + colors.close, end=' ')\n print(guess)\nprint(\"MSG : Accuracy is {}\".format(right / len(preds)))\nprint(\"MSG : Accuracy is {}\".format(right / len(preds)), file=output_file)\nmodel.save('./models/as-' + args.output_name + '.h5')\nwith open('./corpus/as-' + args.output_name + '-training-corpus.csv', 'w') as corpus:\n print('questions,expected', file=corpus)\n for (x, y) in zip(x_train, y_train):\n print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)\nwith open('./corpus/as-' + args.output_name + '-validation-corpus.csv', 'w') as corpus:\n print('questions,expected', file=corpus)\n for (x, y) in zip(x_val, y_val):\n print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)\nwith open('./corpus/as-' + args.output_name + '-testing-corpus.csv', 'w') as corpus:\n print('questions,expected', file=corpus)\n for (x, y) in zip(test_x, test_y):\n print('{},{}'.format(ctable.decode(x), ctable.decode(y)), file=corpus)\nplt.plot(acc)\nplt.plot(val_acc)\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('./fig/as-accuracy-' + args.output_name + '.png')\nplt.clf()\n# summarize history for loss\nplt.plot(loss)\nplt.plot(val_loss)\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.savefig('./fig/as-loss-' + args.output_name + '.png')\noutput_file.close()\nplt.clf()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.random.shuffle", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.clf", "numpy.random.randint", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yyong119/EE208-Teamproject
[ "4cfecbf83981d89a98e811fcc7eefa9134036c43" ]
[ "train/pytorch-train/crnn_main.py" ]
[ "# -*- coding: utf-8 -*-\r\nfrom __future__ import print_function\r\nimport argparse\r\nimport random\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.optim as optim\r\nimport torch.utils.data\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nfrom warpctc_pytorch import CTCLoss\r\nimport os\r\nimport utils\r\nimport dataset\r\nfrom keys import alphabet\r\n#Alphabet = [e.encode('utf-8') for e in alphabet]\r\nimport models.crnn as crnn\r\n#with open('../run/char.txt') as f:\r\n# newChars = f.read().strip().decode('utf-8')\r\n#alphabet += u''.join(list(set(newChars) - set(alphabet)))\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--trainroot', help='path to dataset',default='../data/lmdb/train')\r\nparser.add_argument('--valroot', help='path to dataset',default='../data/lmdb/val')\r\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=4)\r\nparser.add_argument('--batchSize', type=int, default=128, help='input batch size')\r\nparser.add_argument('--imgH', type=int, default=32, help='the height of the input image to network')\r\nparser.add_argument('--imgW', type=int, default=256, help='the width of the input image to network')\r\nparser.add_argument('--nh', type=int, default=256, help='size of the lstm hidden state')\r\nparser.add_argument('--niter', type=int, default=1000000, help='number of epochs to train for')\r\nparser.add_argument('--lr', type=float, default=0.005, help='learning rate for Critic, default=0.00005')\r\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\r\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\r\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\r\nparser.add_argument('--crnn', help=\"path to crnn (to continue training)\",default='../pretrain-models/netCRNN.pth')\r\n#parser.add_argument('--crnn', help=\"path to crnn (to continue training)\",default='')\r\nparser.add_argument('--alphabet', default=alphabet)\r\nparser.add_argument('--experiment', help='Where to store samples and models',default='./save_model')\r\nparser.add_argument('--displayInterval', type=int, default=50, help='Interval to be displayed')\r\nparser.add_argument('--n_test_disp', type=int, default=1000, help='Number of samples to display when test')\r\nparser.add_argument('--valInterval', type=int, default=50, help='Interval to be displayed')\r\nparser.add_argument('--saveInterval', type=int, default=1000, help='Interval to be displayed')\r\nparser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')\r\nparser.add_argument('--adadelta', action='store_true', help='Whether to use adadelta (default is rmsprop)')\r\nparser.add_argument('--keep_ratio', action='store_true', help='whether to keep ratio for image resize')\r\nparser.add_argument('--random_sample', action='store_true', help='whether to sample the dataset with random sampler')\r\nopt = parser.parse_args()\r\nprint(opt)\r\nifUnicode=True\r\nif opt.experiment is None:\r\n opt.experiment = 'expr'\r\nos.system('mkdir {0}'.format(opt.experiment))\r\n\r\nopt.manualSeed = random.randint(1, 10000) # fix seed\r\nprint(\"Random Seed: \", opt.manualSeed)\r\nrandom.seed(opt.manualSeed)\r\nnp.random.seed(opt.manualSeed)\r\ntorch.manual_seed(opt.manualSeed)\r\n\r\ncudnn.benchmark = True\r\n\r\nif torch.cuda.is_available() and not opt.cuda:\r\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\r\n\r\ntrain_dataset = dataset.lmdbDataset(root=opt.trainroot)\r\nassert train_dataset\r\nif not opt.random_sample:\r\n sampler = dataset.randomSequentialSampler(train_dataset, opt.batchSize)\r\nelse:\r\n sampler = None\r\ntrain_loader = torch.utils.data.DataLoader(\r\n train_dataset, batch_size=opt.batchSize,\r\n shuffle=True, sampler=sampler,\r\n num_workers=int(opt.workers),\r\n collate_fn=dataset.alignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio=opt.keep_ratio))\r\ntest_dataset = dataset.lmdbDataset(\r\n root=opt.valroot, transform=dataset.resizeNormalize((256, 32)))\r\n\r\nngpu = int(opt.ngpu)\r\nnh = int(opt.nh)\r\nalphabet = opt.alphabet\r\nnclass = len(alphabet) + 1\r\nnc = 1\r\n\r\nconverter = utils.strLabelConverter(alphabet)\r\ncriterion = CTCLoss()\r\n\r\n\r\n# custom weights initialization called on crnn\r\ndef weights_init(m):\r\n classname = m.__class__.__name__\r\n if classname.find('Conv') != -1:\r\n m.weight.data.normal_(0.0, 0.02)\r\n elif classname.find('BatchNorm') != -1:\r\n m.weight.data.normal_(1.0, 0.02)\r\n m.bias.data.fill_(0)\r\n\r\ncrnn = crnn.CRNN(opt.imgH, nc, nclass, nh, ngpu)\r\ncrnn.apply(weights_init)\r\nif opt.crnn != '':\r\n print('loading pretrained model from %s' % opt.crnn)\r\n crnn.load_state_dict(torch.load(opt.crnn))\r\nprint(crnn)\r\n\r\nimage = torch.FloatTensor(opt.batchSize, 3, opt.imgH, opt.imgH)\r\ntext = torch.IntTensor(opt.batchSize * 5)\r\nlength = torch.IntTensor(opt.batchSize)\r\n\r\nif opt.cuda:\r\n crnn.cuda()\r\n image = image.cuda()\r\n criterion = criterion.cuda()\r\n\r\nimage = Variable(image)\r\ntext = Variable(text)\r\nlength = Variable(length)\r\n\r\n# loss averager\r\nloss_avg = utils.averager()\r\n\r\n# setup optimizer\r\nif opt.adam:\r\n optimizer = optim.Adam(crnn.parameters(), lr=opt.lr,\r\n betas=(opt.beta1, 0.999))\r\nelif opt.adadelta:\r\n optimizer = optim.Adadelta(crnn.parameters(), lr=opt.lr)\r\nelse:\r\n optimizer = optim.RMSprop(crnn.parameters(), lr=opt.lr)\r\n\r\n\r\ndef val(net, dataset, criterion, max_iter=2):\r\n print('Start val')\r\n\r\n for p in crnn.parameters():\r\n p.requires_grad = False\r\n\r\n net.eval()\r\n data_loader = torch.utils.data.DataLoader(\r\n dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers))\r\n val_iter = iter(data_loader)\r\n\r\n i = 0\r\n n_correct = 0\r\n loss_avg = utils.averager()\r\n\r\n max_iter = min(max_iter, len(data_loader))\r\n for i in range(max_iter):\r\n data = val_iter.next()\r\n i += 1\r\n cpu_images, cpu_texts = data\r\n batch_size = cpu_images.size(0)\r\n utils.loadData(image, cpu_images)\r\n if ifUnicode:\r\n cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]\r\n t, l = converter.encode(cpu_texts)\r\n utils.loadData(text, t)\r\n utils.loadData(length, l)\r\n\r\n preds = crnn(image)\r\n preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))\r\n cost = criterion(preds, text, preds_size, length) / batch_size\r\n loss_avg.add(cost)\r\n\r\n _, preds = preds.max(2)\r\n preds = preds.squeeze(2)\r\n preds = preds.transpose(1, 0).contiguous().view(-1)\r\n sim_preds = converter.decode(preds.data, preds_size.data, raw=False)\r\n for pred, target in zip(sim_preds, cpu_texts):\r\n if pred.strip() == target.strip():\r\n n_correct += 1\r\n\r\n raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp]\r\n #for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts):\r\n #print((pred, gt))\r\n #print \r\n accuracy = n_correct / float(max_iter * opt.batchSize)\r\n testLoss = loss_avg.val()\r\n #print('Test loss: %f, accuray: %f' % (testLoss, accuracy))\r\n return testLoss,accuracy\r\n\r\ndef clean_txt(txt):\r\n \"\"\"\r\n filter char where not in alphabet with ' '\r\n \"\"\"\r\n newTxt = u''\r\n for t in txt:\r\n if t in alphabet:\r\n newTxt+=t\r\n else:\r\n newTxt+=u' '\r\n return newTxt\r\n \r\ndef trainBatch(net, criterion, optimizer,flage=False):\r\n data = train_iter.next()\r\n cpu_images, cpu_texts = data##decode utf-8 to unicode\r\n if ifUnicode:\r\n cpu_texts = [ clean_txt(tx.decode('utf-8')) for tx in cpu_texts]\r\n \r\n batch_size = cpu_images.size(0)\r\n utils.loadData(image, cpu_images)\r\n t, l = converter.encode(cpu_texts)\r\n utils.loadData(text, t)\r\n utils.loadData(length, l)\r\n\r\n preds = crnn(image)\r\n preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))\r\n cost = criterion(preds, text, preds_size, length) / batch_size\r\n crnn.zero_grad()\r\n cost.backward()\r\n if flage:\r\n lr = 0.0001\r\n optimizer = optim.Adadelta(crnn.parameters(), lr=lr)\r\n optimizer.step()\r\n return cost\r\n\r\nnum =0\r\nlasttestLoss = 10000\r\ntestLoss = 10000\r\nimport os\r\n\r\ndef delete(path):\r\n \"\"\"\r\n 删除文件\r\n \"\"\"\r\n import os\r\n import glob\r\n paths = glob.glob(path+'/*.pth')\r\n for p in paths:\r\n os.remove(p)\r\n \r\n \r\n \r\n \r\nnumLoss = 0##判断训练参数是否下降 \r\n \r\nfor epoch in range(opt.niter):\r\n train_iter = iter(train_loader)\r\n i = 0\r\n while i < len(train_loader):\r\n #print('The step{} ........\\n'.format(i))\r\n for p in crnn.parameters():\r\n p.requires_grad = True\r\n crnn.train()\r\n #if numLoss>50:\r\n # cost = trainBatch(crnn, criterion, optimizer,True)\r\n # numLoss = 0\r\n #else:\r\n cost = trainBatch(crnn, criterion, optimizer)\r\n loss_avg.add(cost)\r\n i += 1\r\n\r\n #if i % opt.displayInterval == 0:\r\n # print('[%d/%d][%d/%d] Loss: %f' %\r\n # (epoch, opt.niter, i, len(train_loader), loss_avg.val()))\r\n # loss_avg.reset()\r\n\r\n if i % opt.valInterval == 0:\r\n testLoss,accuracy = val(crnn, test_dataset, criterion)\r\n #print('Test loss: %f, accuray: %f' % (testLoss, accuracy))\r\n print(\"epoch:{},step:{},Test loss:{},accuracy:{},train loss:{}\".format(epoch,num,testLoss,accuracy,loss_avg.val()))\r\n loss_avg.reset()\r\n # do checkpointing\r\n num +=1\r\n #lasttestLoss = min(lasttestLoss,testLoss)\r\n \r\n if lasttestLoss >testLoss:\r\n print(\"The step {},last lost:{}, current: {},save model!\".format(num,lasttestLoss,testLoss))\r\n lasttestLoss = testLoss\r\n #delete(opt.experiment)##删除历史模型\r\n torch.save(crnn.state_dict(), '{}/netCRNN.pth'.format(opt.experiment))\r\n numLoss = 0\r\n else:\r\n numLoss+=1\r\n \r\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.FloatTensor", "torch.cuda.is_available", "torch.IntTensor", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cebarbosa/summer_project_hydra
[ "386a01253d92075ff00396229e83caf44eed07a3" ]
[ "source_extraction.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on 28/10/2017\n\n@author: Carlos Eduardo Barbosa\n\nDetection of sources in data and separation of bins prior to Voronoi\ntesselation\n\n\"\"\"\nfrom __future__ import division, print_function\nimport os\n\nimport pyregion\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.convolution import Gaussian2DKernel, convolve\nfrom astropy.table import Table\nimport matplotlib.pyplot as plt\nfrom astropy.stats import SigmaClip\nfrom photutils.background import Background2D, MedianBackground\n\nimport sewpy\n\nimport context\nfrom misc import array_from_header\n\ndef background_removed_data(imgname, redo=False, output=None, hdunum=1):\n \"\"\" Remove background from the image \"\"\"\n data = fits.getdata(imgname, ext=1)\n output = \"detection.fits\"\n if os.path.exists(output) and not redo:\n return output\n sigma_clip = SigmaClip(sigma=3.)\n bkg_estimator = MedianBackground()\n bkg = Background2D(data, (8, 8), filter_size=(5, 5),\n sigma_clip=sigma_clip, bkg_estimator = bkg_estimator)\n outdata = data - bkg.background\n fits.writeto(output, outdata, overwrite=True)\n return output\n\ndef mask_from_regions(imgname, redo=False):\n \"\"\" Mask regions marked in file mask.reg made in ds9. \"\"\"\n data = fits.getdata(imgname)\n filename = \"mask.reg\"\n outfile = \"detection_masked.fits\"\n if os.path.exists(outfile) and not redo:\n mask = fits.getdata(outfile)\n return mask\n r = pyregion.open(filename)\n for i, region in enumerate(r.get_filter()):\n mask = region.mask(data.shape)\n data[mask] = np.nan\n hdu = fits.PrimaryHDU(data)\n hdu.writeto(outfile, overwrite=True)\n return outfile\n\ndef run_sextractor(img, redo=False, outfile=None):\n \"\"\" Produces a catalogue of sources in a given field. \"\"\"\n if outfile is None:\n outfile = \"source-catalog.fits\"\n if os.path.exists(outfile) and not redo:\n return outfile\n params = [\"NUMBER\", \"X_IMAGE\", \"Y_IMAGE\", \"KRON_RADIUS\", \"ELLIPTICITY\",\n \"THETA_IMAGE\", \"A_IMAGE\", \"B_IMAGE\", \"MAG_AUTO\", \"FLUX_RADIUS\"]\n config = {\"CHECKIMAGE_TYPE\": \"BACKGROUND\",\n \"CHECKIMAGE_NAME\": \"background.fits\",\n \"DETECT_THRESH\" : 1.5}\n sew = sewpy.SEW(config=config, sexpath=\"source-extractor\", params=params)\n cat = sew(img)\n cat[\"table\"].write(outfile, format=\"fits\", overwrite=True)\n return outfile\n\ndef mask_sources(img, cat, ignore=None, redo=False, output=None):\n \"\"\" Produces segmentation image with bins for detected sources using\n elliptical regions. \"\"\"\n if output is None:\n output = \"sources_mask.fits\"\n if os.path.exists(output) and not redo:\n return output\n data = fits.getdata(img)\n ydim, xdim = data.shape\n xx, yy = np.meshgrid(np.arange(1, xdim + 1), np.arange(1, ydim + 1))\n table = Table.read(cat, 1)\n if ignore is not None:\n idx = np.array([i for i,x in enumerate(table[\"NUMBER\"]) if x not in\n ignore])\n table = table[idx]\n axratio = table[\"B_IMAGE\"] / table[\"A_IMAGE\"]\n # table = table[axratio > 0.4]\n mask = np.zeros_like(data)\n for source in table:\n R = calc_isophotes(xx, yy, source[\"X_IMAGE\"], source[\"Y_IMAGE\"], \\\n source[\"THETA_IMAGE\"] - 90, source[\"B_IMAGE\"] /\n source[\"A_IMAGE\"])\n Rmax = 1.5 * source[\"KRON_RADIUS\"]\n mask += np.where(R <= Rmax, 1, 0)\n hdu = fits.PrimaryHDU(mask)\n hdu.writeto(output, overwrite=True)\n return output\n\ndef calc_isophotes(x, y, x0, y0, PA, q):\n \"\"\" Calculate isophotes for a given component. \"\"\"\n x = np.copy(x) - x0\n y = np.copy(y) - y0\n shape = x.shape\n theta = np.radians(PA)\n c, s = np.cos(theta), np.sin(theta)\n rot = np.array([[s, c], [-c, s]])\n xy = np.dot(np.column_stack((x.flatten(), y.flatten())), rot).T\n x = np.reshape(xy[0], newshape=shape)\n y = np.reshape(xy[1], newshape=shape)\n return np.sqrt(np.power(x, 2) + np.power(y / q, 2))\n\ndef run_ngc3311(redo=False):\n data_dir = os.path.join(context.home_dir, \"data\")\n fields = context.fields\n for field in fields:\n os.chdir(os.path.join(data_dir, field))\n if field == \"fieldA\":\n imgname = \"ellipse_model.fits\"\n else:\n imgname = f\"sn_field{field[-1]}.fits\"\n detimg = background_removed_data(imgname, redo=redo)\n immasked = mask_from_regions(detimg, redo=redo)\n sexcat = run_sextractor(immasked, redo=redo)\n mask_sources(immasked, sexcat, redo=redo)\n\nif __name__ == \"__main__\":\n run_ngc3311(redo=True)\n" ]
[ [ "numpy.radians", "numpy.power", "numpy.reshape", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.copy", "numpy.zeros_like", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
google/init2winit
[ "62ec9fd31bd7b38bb7c220f15d4187bf0706506d", "62ec9fd31bd7b38bb7c220f15d4187bf0706506d" ]
[ "init2winit/mt_eval/main.py", "init2winit/test_hyperparameters.py" ]
[ "# coding=utf-8\n# Copyright 2021 The init2winit Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Used to evaluate MT model (BLEU/cross_entropy_loss/log_perplexity).\n\n\"\"\"\n\nimport json\nimport os\nimport sys\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom init2winit import hyperparameters\nfrom init2winit.dataset_lib import datasets\nfrom init2winit.model_lib import models\nfrom init2winit.mt_eval import bleu_evaluator\nimport jax\nimport tensorflow.compat.v2 as tf\n\n\n\n\n# Enable flax xprof trace labelling.\nos.environ['FLAX_PROFILE'] = 'true'\n\nflags.DEFINE_string('checkpoint_dir', '', 'Path to the checkpoint to evaluate.')\nflags.DEFINE_integer('seed', 0, 'seed used to initialize the computation.')\nflags.DEFINE_integer('worker_id', 1,\n 'Client id for hparam sweeps and tuning studies.')\nflags.DEFINE_string('experiment_config_filename', None,\n 'Path to the config.json file for this experiment.')\nflags.DEFINE_string(\n 'model', '', 'Name of the model used to evaluate (not'\n 'needed if experiment_config_filenmae is provided).')\nflags.DEFINE_string(\n 'dataset', '', 'Name of the dataset used to evaluate (not'\n 'needed if experiment_config_filenmae is provided).')\nflags.DEFINE_string(\n 'hparam_overrides', '', 'json representation of a flattened dict of hparam '\n 'overrides. For nested dictionaries, the override key '\n 'should be specified as lr_hparams.initial_value.')\nflags.DEFINE_string(\n 'trial_hparams_filename', None,\n 'Path to the hparams.json file for the trial we want to run inference on.')\nflags.DEFINE_string('mt_eval_config', '',\n 'Json representation of the mt evaluation config.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(unused_argv):\n # Necessary to use the tfds loader.\n tf.enable_v2_behavior()\n\n if jax.process_count() > 1:\n # TODO(ankugarg): Add support for multihost inference.\n raise NotImplementedError('BLEU eval does not support multihost inference.')\n\n rng = jax.random.PRNGKey(FLAGS.seed)\n\n mt_eval_config = json.loads(FLAGS.mt_eval_config)\n\n if FLAGS.experiment_config_filename:\n with tf.io.gfile.GFile(FLAGS.experiment_config_filename) as f:\n experiment_config = json.load(f)\n if jax.process_index() == 0:\n logging.info('experiment_config: %r', experiment_config)\n dataset_name = experiment_config['dataset']\n model_name = experiment_config['model']\n else:\n assert FLAGS.dataset and FLAGS.model\n dataset_name = FLAGS.dataset\n model_name = FLAGS.model\n\n if jax.process_index() == 0:\n logging.info('argv:\\n%s', ' '.join(sys.argv))\n logging.info('device_count: %d', jax.device_count())\n logging.info('num_hosts : %d', jax.host_count())\n logging.info('host_id : %d', jax.host_id())\n\n model_class = models.get_model(model_name)\n dataset_builder = datasets.get_dataset(dataset_name)\n dataset_meta_data = datasets.get_dataset_meta_data(dataset_name)\n\n hparam_overrides = None\n if FLAGS.hparam_overrides:\n if isinstance(FLAGS.hparam_overrides, str):\n hparam_overrides = json.loads(FLAGS.hparam_overrides)\n\n merged_hps = hyperparameters.build_hparams(\n model_name=model_name,\n initializer_name=experiment_config['initializer'],\n dataset_name=dataset_name,\n hparam_file=FLAGS.trial_hparams_filename,\n hparam_overrides=hparam_overrides)\n\n if jax.process_index() == 0:\n logging.info('Merged hps are: %s', json.dumps(merged_hps.to_json()))\n\n evaluator = bleu_evaluator.BLEUEvaluator(FLAGS.checkpoint_dir, merged_hps,\n rng,\n model_class, dataset_builder,\n dataset_meta_data,\n mt_eval_config)\n evaluator.translate_and_calculate_bleu()\n\n\nif __name__ == '__main__':\n app.run(main)\n", "# coding=utf-8\n# Copyright 2021 The init2winit Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for hyperparameters.py.\"\"\"\n\nfrom absl.testing import absltest\nfrom init2winit import hyperparameters\nimport tensorflow.compat.v1 as tf\n\n\nclass HyperParameterTest(absltest.TestCase):\n \"\"\"Tests hyperparameter overrides.\"\"\"\n\n def test_override(self):\n \"\"\"Test polynomial schedule works correctly.\"\"\"\n hps_overrides = {\n 'lr_hparams.schedule': 'polynomial',\n 'lr_hparams.power': 2.0,\n 'lr_hparams.base_lr': .1,\n 'lr_hparams.end_factor': .01,\n 'lr_hparams.decay_steps_factor': 0.5,\n }\n\n merged_hps = hyperparameters.build_hparams(\n model_name='transformer',\n initializer_name='noop',\n dataset_name='lm1b',\n hparam_file=None,\n hparam_overrides=hps_overrides)\n\n self.assertEqual(merged_hps.lr_hparams['schedule'],\n 'polynomial')\n self.assertEqual(\n set(merged_hps.lr_hparams.keys()),\n set([\n 'schedule', 'power', 'base_lr',\n 'end_factor', 'decay_steps_factor'\n ]))\n\nif __name__ == '__main__':\n tf.enable_v2_behavior()\n absltest.main()\n" ]
[ [ "tensorflow.compat.v2.io.gfile.GFile", "tensorflow.compat.v2.enable_v2_behavior" ], [ "tensorflow.compat.v1.enable_v2_behavior" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jacks0nJ/Importance-Sampling-Code
[ "f41fac451e9f78ab2130600ca83e1a6406ee43a5", "f41fac451e9f78ab2130600ca83e1a6406ee43a5" ]
[ "pyfpt/numerics/is_simulation.py", "pyfpt/analytics/gaussian_deviation.py" ]
[ "'''\nImportance Sampling Simulation\n------------------------------\nThis is the main module of the PyFPT code, as it runs the simulations, post\nprocesses and exports the data ready for plotting.\n'''\n\nfrom timeit import default_timer as timer\nimport multiprocessing as mp\nfrom multiprocessing import Process, Queue\n\nimport numpy as np\n\nfrom .multi_processing_error import multi_processing_error\nfrom .histogram_data_truncation import histogram_data_truncation\nfrom .save_data_to_file import save_data_to_file\nfrom .data_points_pdf import data_points_pdf\n\nfrom .importance_sampling_cython import\\\n importance_sampling_simulations\n\n\ndef is_simulation(drift, diffusion, x_in, x_end,\n num_runs, bias, time_step, bins=50, min_bin_size=400,\n num_sub_samples=20, estimator='lognormal',\n save_data=False, t_in=0., t_f=100, x_r=None):\n \"\"\"Executes the simulation runs, then returns the histogram bin centres,\n heights and errors.\n\n Parameters\n ----------\n drift : function\n The drift term of the simulated Langevin equation. Must take both x and\n t as arguments in the format ``(x, t)``.\n diffusion : function\n The diffusion term of the simulated Langevin equation. Must take both\n x and t as arguments in the format ``(x, t)``.\n x_in : float\n The initial position value.\n x_end : float\n The end position value, i.e. the threshold which defines the FPT\n problem.\n num_runs : int\n The number of simulation runs.\n bias : scalar or function\n The bias used in the simulated Langevin equation to achieve importance\n sampling\n\n If a scalar (float or int), this the bias amplitude, i.e. a coefficent\n which mutiplies the the diffusion to define the bias.\n\n If a function, this simply defines the bias used. Must take arguments\n for both position and time in the format ``(x, t)``.\n bins : int or sequence, optional\n If bins is an integer, it defines the number equal width bins for the\n first-passage times. If bins is a list or numpy array, it defines the\n bin edges, including the left edge of the first bin and the right edge\n of the last bin. The widths can vary. Defaults to 50 evenly spaced\n bins.\n time_step : float or int, optional\n The time step. This should be at least smaller than the standard\n deviation of the FPTs.\n min_bin_size : int, optional\n The minimum number of runs per bin to included in the data analysis.\n If a bin has less than this number, it is truncated. Defaults to 400.\n estimator : string, optional\n The estimator used to reconstruct the target distribution probability\n density from the importance sample. If ``'lognormal'``, it assumes the\n weights in each bin follow a lognomral distribution. If ``'naive'``, no\n assumption is made but more runs are required for convergance.\n num_sub_samples : int, optional\n The number of subsamples used in jackknife estimation of the errors\n used for the ``'naive'`` estimator. Defaults to 20 when ``estimator``\n is ``'naive'``.\n Save_data : bool, optional\n If ``True``, the first-passage times and the associated weights for\n each run is saved to a file.\n t_in : float, optional\n The initial time value of simulation Defaults to 0.\n t_f : float, optional\n The maxiumum FPT allowed per run. If this is exceded, the\n simulation run ends and returns ``t_f``, which can then be\n truncated. Defaults to 100.\n x_r : float, optional\n The value of the reflective boundary. Must be compatible with the x_in\n and x_end chosen. Defaults to unreachable value, effectively no\n boundary.\n Returns\n -------\n bin_centres : list\n The centres of the histogram bins.\n heights : list\n The heights of the normalised histogram bars.\n errors : list\n The errors in estimating the heights.\n \"\"\"\n # Checking drift and diffusion are of the correct format\n if callable(drift) is True:\n if isinstance(drift(x_in, t_in), float) is True:\n pass\n else:\n ValueError('Provided drift is not the format (x, t)')\n else:\n ValueError('Provided drift is not a function')\n if callable(diffusion) is True:\n if isinstance(diffusion(x_in, t_in), float) is True:\n pass\n else:\n ValueError('Provided diffusion is not the format (x, t)')\n else:\n ValueError('Provided diffusion is not a function')\n\n # Make sure provided values are floats for Cython\n if isinstance(x_in, int) is True:\n x_in = 1.0*x_in\n if isinstance(x_end, int) is True:\n x_end = 1.0*x_end\n # Checking bias is of correct form\n if isinstance(bias, float) is True or isinstance(bias, float) is True:\n # If the bias argument is a scalar, use diffusion based bias\n bias_type = 'diffusion'\n if bias == 0:\n estimator = 'naive'\n print('As direct simulation, defaulting to naive estimator')\n elif callable(bias):\n # If a function is provided, check it is of the correct form\n if isinstance(bias(x_in, t_in), float) is True:\n bias_type = 'custom'\n else:\n ValueError('bias function must be of the form bias(x, t)')\n else:\n ValueError('Provided bias is not a number or function')\n\n if isinstance(time_step, float) is not True\\\n and isinstance(time_step, int) is not True:\n raise ValueError('time_step is not a number')\n\n # Check the user has provided a estimator\n if estimator != 'lognormal' and estimator != 'naive':\n print('Invalid estimator argument, defaulting to naive method')\n estimator = 'naive'\n\n # If no x_r argument is provided, default to infinite boundary\n if x_r is None:\n # Set the reflective surface at an arbitrarily large value in the\n # opposite direction to propagation\n x_r = 10000*(x_in-x_end)\n elif isinstance(x_r, float) is False:\n if isinstance(x_r, int) is True:\n if isinstance(x_r, bool) is True:\n raise ValueError('x_r is not a number')\n else:\n pass\n else:\n raise ValueError('x_r is not a number')\n elif (x_r-x_in)*(x_in-x_end) < 0:\n raise ValueError('End and relfective surfaces not compatible with' +\n ' initial value.')\n\n # The number of sims per core, so the total is correct\n num_runs_per_core = int(num_runs/mp.cpu_count())\n # Time how long the simulation runs take\n start = timer()\n\n # Using multiprocessing\n def multi_processing_func(x_in, x_r, x_end, t_in, t_f,\n time_step, bias, num_runs, queue_efolds,\n queue_ws, queue_refs):\n results =\\\n importance_sampling_simulations(x_in, x_r, x_end, t_in,\n t_f, time_step, bias,\n num_runs, drift, diffusion,\n bias_type=bias_type,\n count_refs=False)\n fpt_values = np.array(results[0][:])\n ws = np.array(results[1][:])\n queue_efolds.put(fpt_values)\n queue_ws.put(ws)\n\n queue_efolds = Queue()\n queue_ws = Queue()\n queue_refs = Queue()\n cores = int(mp.cpu_count()/1)\n\n print('Number of cores used: '+str(cores))\n processes = [Process(target=multi_processing_func,\n args=(x_in, x_r, x_end, t_in, t_f,\n time_step, bias, num_runs_per_core,\n queue_efolds, queue_ws, queue_refs))\n for i in range(cores)]\n\n for p in processes:\n p.start()\n\n # More efficient to work with numpy arrays\n fpt_array = np.array([queue_efolds.get() for p in processes])\n ws_array = np.array([queue_ws.get() for p in processes])\n\n end = timer()\n print(f'The simulations took: {end - start} seconds')\n\n # Combine into columns into 1\n fpt_values = fpt_array.flatten()\n w_values = ws_array.flatten()\n\n # Sort in order of increasing Ns\n sort_idx = np.argsort(fpt_values)\n fpt_values = fpt_values[sort_idx]\n w_values = w_values[sort_idx]\n\n # Checking if multipprocessing error occured, by looking at correlation\n _ = multi_processing_error(fpt_values, w_values)\n\n # Truncating any data which did not reach x_end\n fpt_values, w_values =\\\n histogram_data_truncation(fpt_values, t_f, weights=w_values,\n num_sub_samples=num_sub_samples)\n # Saving the data\n if save_data is True:\n if bias_type == 'diffusion':\n save_data_to_file(fpt_values, w_values, x_in, num_runs, bias)\n else:\n # Label the file differently if custom bias is used.\n save_data_to_file(fpt_values, w_values, x_in, num_runs,\n bias(x_in, 0), extra_label='_custom_bias')\n\n # Now analysisng the data to creating the histogram/PDF data\n bin_centres, heights, errors, num_runs_used, bin_edges_untruncated =\\\n data_points_pdf(fpt_values, w_values, estimator, bins=bins,\n min_bin_size=min_bin_size,\n num_sub_samples=num_sub_samples)\n # Return data as lists\n return bin_centres.tolist(), heights.tolist(), errors.tolist()\n", "'''\nGaussian Deviation\n------------------\nThis module calculates the point of deviation from Gaussian behaviour for the\nfirst-passage times in the number of e-folds for a provided threshold value\nusing the `Edgeworth series`_ in low diffusion limit\nand the relations for the central moments given in `Vennin--Starobinsky 2015`_.\nThis is calculated by using root finding to find the point at which the higher\norder terms of the Edgeworth series first equal the threshold.\n\n.. _Edgeworth series: https://en.wikipedia.org/wiki/Edgeworth_series\n.. _Vennin--Starobinsky 2015: https://arxiv.org/abs/1506.04732\n'''\n\n\nimport numpy as np\nfrom scipy import optimize\n\nfrom .mean_efolds import mean_efolds\nfrom .variance_efolds import variance_efolds\nfrom .skewness_efolds import skewness_efolds\nfrom .kurtosis_efolds import kurtosis_efolds\n\n\n# Using the Gram–Charlier A series\n# https://en.wikipedia.org/wiki/Edgeworth_series to approximate when we expect\n# classical deviation from a gaussian. This is done by finding x such that the\n# higher order terms of the edgeworth expanion are\n# nu is the amount pf deviation from a Gaussian.\ndef gaussian_deviation(potential, potential_dif, potential_ddif, phi_in,\n phi_end, nu=1., phi_interval=None):\n \"\"\"Returns the skewness of the number of e-folds.\n\n Parameters\n ----------\n potential : function\n The potential.\n potential_dif : function\n The potential's first derivative.\n potential_ddif : function\n The potential's second derivative.\n phi_in : float\n The initial scalar field value.\n nu : float, optional\n The decimal threshold of the deviation from Gaussianity. Defaults to 1\n phi_interval : list, optional.\n The field interval which contains the root. Defaults to between 0 and\n 10000 standard deviations from the mean.\n\n Returns\n -------\n deviation_point : float\n The field value at which the deviation occurs.\n\n \"\"\"\n mean =\\\n mean_efolds(potential, potential_dif, potential_ddif, phi_in, phi_end)\n std =\\\n variance_efolds(potential, potential_dif, potential_ddif, phi_in,\n phi_end)**0.5\n skewness =\\\n skewness_efolds(potential, potential_dif, potential_ddif, phi_in,\n phi_end)\n kurtosis =\\\n kurtosis_efolds(potential, potential_dif, potential_ddif, phi_in,\n phi_end)\n\n def higher_order_egdeworth_term(y):\n norm_y = (y-mean)/std\n skew_term = np.divide(skewness*hermite_poly3(norm_y), 6)\n kurtosis_term = np.divide(kurtosis*hermite_poly4(norm_y), 24)\n skew_squared_term =\\\n np.divide(hermite_poly6(norm_y)*skewness**2, 72)\n return (skew_term+kurtosis_term+skew_squared_term)-nu\n\n if phi_interval is None:\n sol = optimize.root_scalar(higher_order_egdeworth_term,\n method='brentq',\n bracket=[mean, mean+10000*std])\n else:\n sol = optimize.root_scalar(higher_order_egdeworth_term,\n method='brentq', bracket=phi_interval)\n # The root is the position of when deviation occurs\n deviation_point = sol.root\n return deviation_point\n\n\n# This is the \"probabilist's Hermite polynomial\", which is different to the\n# \"physicist's Hermite polynomials\" used by SciPy\ndef hermite_poly3(y):\n hermite_poly3 = y**3-3*y\n return hermite_poly3\n\n\n# This is the \"probabilist's Hermite polynomial\", which is different to the\n# \"physicist's Hermite polynomials\" used by SciPy\ndef hermite_poly4(y):\n hermite_poly4 = y**4-6*y+3\n return hermite_poly4\n\n\n# This is the \"probabilist's Hermite polynomial\", which is different to the\n# \"physicist's Hermite polynomials\" used by SciPy\ndef hermite_poly6(y):\n hermite_poly6 = y**6-15*y**4+45*y**2-15\n return hermite_poly6\n" ]
[ [ "numpy.argsort", "numpy.array" ], [ "scipy.optimize.root_scalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.3", "1.8" ], "tensorflow": [] } ]
HaeckelK/bookkeeping
[ "6f8b62f1322fe1c409f397222653382d302d9754" ]
[ "ledger.py" ]
[ "from abc import ABC, abstractmethod\nfrom typing import List\n\nimport numpy as np\n\n\nclass Ledger(ABC):\n @abstractmethod\n def get_next_batch_id(self) -> int:\n \"\"\"Return next available batch id.\"\"\"\n\n @abstractmethod\n def get_next_transaction_id(self) -> int:\n \"\"\"Return next available transaction id.\"\"\"\n\n\nclass PandasLedger(Ledger):\n def get_next_batch_id(self) -> int:\n try:\n next_id = int(self.df[\"batch_id\"].max()) + 1\n except ValueError:\n return 0\n return next_id\n\n def append(self, df) -> List[int]:\n next_id = self.get_next_transaction_id()\n ids = np.arange(start=next_id, stop=next_id + df.shape[0])\n df[\"transaction_id\"] = ids\n self.df = self.df.append(df[self.columns], ignore_index=True, sort=False)\n return list(ids)\n\n def get_next_transaction_id(self) -> int:\n try:\n next_id = int(self.df[\"transaction_id\"].max()) + 1\n except ValueError:\n return 0\n return next_id\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lamyiowce/training
[ "498b945dd914573bdbf7a871eaeebd9388b60b76", "da4c959b5a7b65091b850872cdd4014d768c087c", "498b945dd914573bdbf7a871eaeebd9388b60b76" ]
[ "object_detection/pytorch/tools/test_net.py", "speech_recognition/pytorch/eval_model.py", "rnn_speech_recognition/pytorch/common/sampler.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Inference\")\n parser.add_argument(\n \"--config-file\",\n default=\"/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n save_dir = \"\"\n logger = setup_logger(\"maskrcnn_benchmark\", save_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(cfg)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n model = build_detection_model(cfg)\n model.to(cfg.MODEL.DEVICE)\n\n output_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n )\n synchronize()\n\n\nif __name__ == \"__main__\":\n main()\n", "import json\n\nimport torch\nfrom torch.autograd import Variable\nfrom warpctc_pytorch import CTCLoss\n\nimport torch.nn.functional as F\n\nimport sys\n### Import Data Utils ###\nsys.path.append('../')\n\nfrom experiments.ml.specaugment.mlcommons.training.speech_recognition.data.bucketing_sampler import BucketingSampler, SpectrogramDatasetWithLength\nfrom experiments.ml.specaugment.mlcommons.training.speech_recognition.data.data_loader import AudioDataLoader, SpectrogramDataset\nfrom decoder import GreedyDecoder\nfrom model import DeepSpeech, supported_rnns\n\ndef eval_model(model, test_loader, decoder, params):\n start_iter = 0 # Reset start iteration for next epoch\n total_cer, total_wer = 0, 0\n model.eval()\n for i, (data) in enumerate(test_loader): # test\n inputs, targets, input_percentages, target_sizes = data\n\n with torch.no_grad():\n inputs = Variable(inputs, volatile=True)\n\n # unflatten targets\n split_targets = []\n offset = 0\n for size in target_sizes:\n split_targets.append(targets[offset:offset + size])\n offset += size\n\n if params.cuda:\n inputs = inputs.cuda()\n\n out = model(inputs)\n out = out.transpose(0, 1) # TxNxH\n seq_length = out.size(0)\n sizes = input_percentages.mul_(int(seq_length)).int()\n\n decoded_output = decoder.decode(out.data, sizes)\n target_strings = decoder.process_strings(decoder.convert_to_strings(split_targets))\n wer, cer = 0, 0\n for x in range(len(target_strings)):\n wer += decoder.wer(decoded_output[x], target_strings[x]) / float(len(target_strings[x].split()))\n cer += decoder.cer(decoded_output[x], target_strings[x]) / float(len(target_strings[x]))\n total_cer += cer\n total_wer += wer\n\n if params.cuda:\n torch.cuda.synchronize()\n del out\n wer = total_wer / len(test_loader.dataset)\n cer = total_cer / len(test_loader.dataset)\n wer *= 100\n cer *= 100\n\n return wer, cer\n", "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\n\nfrom torch.utils.data.sampler import Sampler\n\n\nclass DistributedSampler(Sampler):\n def __init__(self, dataset, batch_size, world_size, rank):\n \"\"\"\n Constructor for the DistributedSampler.\n :param dataset: dataset\n :param batch_size: local batch size\n :param world_size: number of distributed workers\n :param rank: rank of the current process\n \"\"\"\n self.dataset = dataset\n self.world_size = world_size\n self.rank = rank\n self.epoch = 0\n\n self.batch_size = batch_size\n self.global_batch_size = batch_size * world_size\n\n self.data_len = len(self.dataset)\n\n self.num_samples = self.data_len // self.global_batch_size \\\n * self.global_batch_size\n\n def distribute_batches(self, indices):\n \"\"\"\n Assigns batches to workers.\n Consecutive ranks are getting consecutive batches.\n :param indices: torch.tensor with batch indices\n \"\"\"\n assert len(indices) == self.num_samples\n\n indices = indices.view(-1, self.batch_size)\n indices = indices[self.rank::self.world_size].contiguous()\n indices = indices.view(-1)\n indices = indices.tolist()\n\n assert len(indices) == self.num_samples // self.world_size\n return indices\n\n def reshuffle_batches(self, indices, rng):\n \"\"\"\n Permutes global batches\n :param indices: torch.tensor with batch indices\n :param rng: instance of torch.Generator\n \"\"\"\n indices = indices.view(-1, self.global_batch_size)\n num_batches = indices.shape[0]\n order = torch.randperm(num_batches, generator=rng)\n indices = indices[order, :]\n indices = indices.view(-1)\n return indices\n\n def __iter__(self):\n g = torch.Generator()\n g.manual_seed(self.epoch)\n # generate permutation\n indices = torch.randperm(self.data_len, generator=rng)\n\n # make indices evenly divisible by (batch_size * world_size)\n indices = indices[:self.num_samples]\n\n # assign batches to workers\n indices = self.distribute_batches(indices)\n return iter(indices)\n\n def set_epoch(self, epoch):\n \"\"\"\n Sets current epoch index.\n Epoch index is used to seed RNG in __iter__() function.\n :param epoch: index of current epoch\n \"\"\"\n self.epoch = epoch\n\n def __len__(self):\n return self.num_samples // self.world_size\n\n\nclass BucketingSampler(DistributedSampler):\n def __init__(self, dataset, batch_size, num_buckets, world_size, rank):\n \"\"\"\n Bucketing sampler with approx. equally-sized buckets.\n :param dataset: dataset\n :param batch_size: local batch size\n :param seeds: list of seeds, one seed for each training epoch\n :param num_buckets: number of buckets\n :param world_size: number of distributed workers\n :param rank: rank of the current process\n \"\"\"\n super().__init__(dataset, batch_size, world_size, rank)\n\n self.num_buckets = num_buckets\n len_ids = np.argsort([sample['duration'] for sample in dataset.samples])\n self.buckets = [torch.from_numpy(t)\n for t in np.array_split(len_ids, num_buckets)]\n global_bs = self.global_batch_size\n\n def __iter__(self):\n g = torch.Generator()\n g.manual_seed(self.epoch)\n global_bsz = self.global_batch_size\n\n indices = []\n for bid in range(self.num_buckets):\n # random shuffle within current bucket\n perm = torch.randperm(len(self.buckets[bid]), generator=g)\n bucket_indices = self.buckets[bid][perm]\n\n # add samples from current bucket to indices for current epoch\n indices.append(bucket_indices)\n\n indices = torch.cat(indices)\n\n # make indices evenly divisible by global batch size\n length = len(indices) // global_bsz * global_bsz\n indices = indices[:length]\n\n assert len(indices) % self.global_batch_size == 0\n\n # perform global reshuffle of all global batches\n indices = self.reshuffle_batches(indices, g)\n # distribute batches to individual workers\n indices = self.distribute_batches(indices)\n return iter(indices)\n\n" ]
[ [ "torch.distributed.init_process_group", "torch.cuda.set_device" ], [ "torch.cuda.synchronize", "torch.no_grad", "torch.autograd.Variable" ], [ "torch.Generator", "torch.cat", "torch.randperm", "torch.from_numpy", "numpy.argsort", "numpy.array_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ahmadianlab/tc-gan
[ "06c549e8ae74bc6af62fddeed698565ea1f548c5", "06c549e8ae74bc6af62fddeed698565ea1f548c5", "06c549e8ae74bc6af62fddeed698565ea1f548c5" ]
[ "tc_gan/networks/tests/test_tuning_curve.py", "tc_gan/loaders/tests/test_gen_moments_records.py", "tc_gan/tests/test_ctc_stats_recorder.py" ]
[ "import numpy as np\nimport pytest\n\nfrom ...core import consume_config\nfrom ..cwgan import ConditionalTuningCurveGenerator\nfrom ..ssn import emit_tuning_curve_generator, ssn_type_choices\nfrom ..wgan import DEFAULT_PARAMS\nfrom .test_euler_ssn import JDS\n\nTEST_PARAMS = dict(\n DEFAULT_PARAMS,\n # Stimulator:\n num_tcdom=10,\n num_sites=50,\n # Model / SSN:\n tau_E=2,\n dt=0.1,\n seqlen=240,\n skip_steps=200,\n # Prober:\n probes=[0],\n **JDS # Model / SSN\n)\ndel TEST_PARAMS['bandwidths']\ndel TEST_PARAMS['contrasts']\ndel TEST_PARAMS['sample_sites']\ndel TEST_PARAMS['gen']\ndel TEST_PARAMS['disc']\n\n\ndef emit_tcg_for_test(**kwargs):\n return emit_tuning_curve_generator(**dict(TEST_PARAMS, **kwargs))\n\n\ndef tcg_for_test(config={}, **kwargs):\n tcg, rest = consume_config(emit_tcg_for_test, config, **kwargs)\n assert not rest\n return tcg\n\n\ndef get_param_values(self):\n values = {}\n for p in self.get_all_params():\n values[p.name] = p.get_value()\n return values\n\n\[email protected]('ssn_type, params', [\n ('default', {}),\n # dict(J=0.5), # unsupported (should I?)\n ('default', dict(J=[[1, 2], [3, 4]])),\n ('default', dict(J=np.array([[1, 2], [3, 4]], dtype=int))),\n ('default', dict(J=np.array([[1, 2], [3, 4]], dtype='float32'))),\n ('heteroin', dict(V=[0.3, 0])),\n ('deg-heteroin', dict(V=0.5)),\n])\ndef test_tcg_set_params(ssn_type, params):\n config = dict(ssn_type=ssn_type)\n tcg = tcg_for_test(config)\n keys = set(params)\n tcg.set_params(params)\n assert keys == set(params) # set_params must not modify params\n actual = get_param_values(tcg)\n\n test = {}\n for k in keys:\n test[k] = np.allclose(actual[k], params[k])\n # Manually compare parameters (instead of\n # np.testing.assert_equal) since params[k] may be broadcast to\n # array.\n\n assert all(test.values())\n\n\ndef test_tcg_set_unknown_params():\n tcg = tcg_for_test()\n with pytest.raises(ValueError) as excinfo:\n tcg.set_params(dict(V=[0.3, 0]))\n assert 'Unknown parameters:' in str(excinfo.value)\n\n\nflat_param_names = {\n 'default': [\n 'J_EE', 'J_EI',\n 'J_IE', 'J_II',\n 'D_EE', 'D_EI',\n 'D_IE', 'D_II',\n 'S_EE', 'S_EI',\n 'S_IE', 'S_II',\n ],\n}\nflat_param_names['heteroin'] = ['V_E', 'V_I'] + flat_param_names['default']\nflat_param_names['deg-heteroin'] = ['V'] + flat_param_names['default']\n\n\[email protected]('ssn_type', ssn_type_choices)\[email protected]('conditional', [False, True])\ndef test_tcg_flat_param_names(ssn_type, conditional):\n desired_names = tuple(flat_param_names[ssn_type])\n config = {}\n if conditional:\n config['emit_tcg'] = ConditionalTuningCurveGenerator.consume_kwargs\n tcg = tcg_for_test(config, ssn_type=ssn_type)\n assert tcg.get_flat_param_names() == desired_names\n", "from types import SimpleNamespace\n\nimport numpy as np\nimport pandas\nimport pytest\n\nfrom ...execution import DataStore\nfrom ...recorders import GenMomentsRecorder\nfrom ..datastore_loader import DataStoreLoader1\n\n\[email protected]('num_mom_conds', [1, 2, 12])\ndef test_record_load(num_mom_conds, tmpdir):\n datastore = DataStore(str(tmpdir))\n recorder = GenMomentsRecorder(datastore, num_mom_conds)\n\n num_steps = 10\n mom_shape = (num_steps, 2 * num_mom_conds)\n desired = pandas.DataFrame(\n np.arange(np.prod(mom_shape)).reshape(mom_shape),\n columns=pandas.MultiIndex.from_product([['mean', 'var'],\n range(num_mom_conds)]),\n dtype='double')\n desired['step'] = np.arange(num_steps, dtype='uint32')\n\n for gen_step in range(num_steps):\n update_result = SimpleNamespace(gen_moments=np.asarray([\n desired.loc[gen_step, 'mean'],\n desired.loc[gen_step, 'var'],\n ]))\n recorder.record(gen_step, update_result)\n\n loader = DataStoreLoader1(str(tmpdir))\n actual = loader.load('gen_moments')\n pandas.testing.assert_frame_equal(actual, desired)\n", "import numpy as np\n\nfrom ..recorders import ConditionalTuningCurveStatsRecorder\nfrom ..utils import cartesian_product\nfrom ..networks.tests.test_conditional_minibatch import arangemd\n\n\ndef test_ctc_analyze_rect_data():\n contrasts = [5, 20]\n norm_probes = [0, 0.5]\n cell_types = [0, 1]\n conditions = cartesian_product(contrasts, norm_probes, cell_types).T\n num_conditions = len(conditions)\n\n num_bandwidths = 5\n tuning_curves = tc_mean = arangemd((len(conditions), num_bandwidths))\n\n count = 3\n conditions = np.tile(conditions, (count, 1))\n tuning_curves = np.tile(tuning_curves, (count, 1))\n\n indices = np.arange(len(conditions))\n np.random.RandomState(0).shuffle(indices)\n tuning_curves = tuning_curves[indices]\n conditions = conditions[indices]\n\n table = list(ConditionalTuningCurveStatsRecorder.analyze(tuning_curves,\n conditions))\n table = np.array(table)\n\n recorder = ConditionalTuningCurveStatsRecorder(None, num_bandwidths)\n shift_contrast = recorder.column_names.index('contrast')\n ncols = len(recorder.column_names[shift_contrast:])\n assert table.shape == (num_conditions, ncols)\n\n i_count = recorder.column_names.index('count') - shift_contrast\n i_mean_beg = recorder.column_names.index('mean_0') - shift_contrast\n i_mean_end = recorder.column_names.index('var_0') - shift_contrast\n i_var_beg = i_mean_end\n i_var_end = None\n\n np.testing.assert_equal(table[:, i_count], count)\n np.testing.assert_equal(table[:, i_var_beg:i_var_end], 0)\n np.testing.assert_equal(table[:, i_mean_beg:i_mean_end], tc_mean)\n" ]
[ [ "numpy.array", "numpy.allclose" ], [ "numpy.asarray", "numpy.arange", "pandas.testing.assert_frame_equal", "numpy.prod" ], [ "numpy.testing.assert_equal", "numpy.array", "numpy.tile", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
manivaradarajan/tensorboard
[ "6ba7155a614cf1cfab97f8ec7c561adb0a609b0d" ]
[ "tensorboard/plugins/core/core_plugin_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the TensorBoard core endpoints.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections.abc\nimport contextlib\nimport json\nimport os\nimport six\nimport zipfile\n\ntry:\n # python version >= 3.3\n from unittest import mock\nexcept ImportError:\n import mock # pylint: disable=unused-import\n\nimport tensorflow as tf\n\nfrom werkzeug import test as werkzeug_test\nfrom werkzeug import wrappers\n\nfrom tensorboard.backend import application\nfrom tensorboard.backend.event_processing import (\n plugin_event_multiplexer as event_multiplexer,\n)\nfrom tensorboard.data import provider\nfrom tensorboard.plugins import base_plugin\nfrom tensorboard.plugins.core import core_plugin\nfrom tensorboard.util import test_util\n\nFAKE_INDEX_HTML = b\"<!doctype html><title>fake-index</title>\"\n\n\nclass FakeFlags(object):\n def __init__(\n self,\n bind_all=False,\n host=None,\n inspect=False,\n version_tb=False,\n logdir=\"\",\n logdir_spec=\"\",\n event_file=\"\",\n db=\"\",\n path_prefix=\"\",\n generic_data=\"true\",\n ):\n self.bind_all = bind_all\n self.host = host\n self.inspect = inspect\n self.version_tb = version_tb\n self.logdir = logdir\n self.logdir_spec = logdir_spec\n self.event_file = event_file\n self.db = db\n self.path_prefix = path_prefix\n self.generic_data = generic_data\n\n\nclass CorePluginFlagsTest(tf.test.TestCase):\n def testFlag(self):\n loader = core_plugin.CorePluginLoader()\n loader.fix_flags(FakeFlags(version_tb=True))\n loader.fix_flags(FakeFlags(inspect=True, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=True, event_file=\"/tmp/event.out\"))\n loader.fix_flags(FakeFlags(inspect=False, logdir=\"/tmp\"))\n loader.fix_flags(FakeFlags(inspect=False, db=\"sqlite:foo\"))\n # User can pass both, although the behavior is not clearly defined.\n loader.fix_flags(\n FakeFlags(inspect=False, logdir=\"/tmp\", db=\"sqlite:foo\")\n )\n\n logdir_or_db_req = r\"A logdir or db must be specified\"\n one_of_event_or_logdir_req = (\n r\"Must specify either --logdir.*but not both.$\"\n )\n event_or_logdir_req = r\"Must specify either --logdir or --event_file.$\"\n\n with six.assertRaisesRegex(self, ValueError, event_or_logdir_req):\n loader.fix_flags(FakeFlags(inspect=True))\n with six.assertRaisesRegex(\n self, ValueError, one_of_event_or_logdir_req\n ):\n loader.fix_flags(\n FakeFlags(\n inspect=True, logdir=\"/tmp\", event_file=\"/tmp/event.out\"\n )\n )\n with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):\n loader.fix_flags(FakeFlags(inspect=False))\n with six.assertRaisesRegex(self, ValueError, logdir_or_db_req):\n loader.fix_flags(\n FakeFlags(inspect=False, event_file=\"/tmp/event.out\")\n )\n\n def testPathPrefix_stripsTrailingSlashes(self):\n loader = core_plugin.CorePluginLoader()\n for path_prefix in (\"/hello\", \"/hello/\", \"/hello//\", \"/hello///\"):\n flag = FakeFlags(\n inspect=False, logdir=\"/tmp\", path_prefix=path_prefix\n )\n loader.fix_flags(flag)\n self.assertEqual(\n flag.path_prefix,\n \"/hello\",\n \"got %r (input %r)\" % (flag.path_prefix, path_prefix),\n )\n\n def testPathPrefix_mustStartWithSlash(self):\n loader = core_plugin.CorePluginLoader()\n flag = FakeFlags(inspect=False, logdir=\"/tmp\", path_prefix=\"noslash\")\n with self.assertRaises(base_plugin.FlagsError) as cm:\n loader.fix_flags(flag)\n msg = str(cm.exception)\n self.assertIn(\"must start with slash\", msg)\n self.assertIn(repr(\"noslash\"), msg)\n\n\nclass CorePluginNoDataTest(tf.test.TestCase):\n def setUp(self):\n super(CorePluginNoDataTest, self).setUp()\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=self.get_temp_dir(),\n multiplexer=event_multiplexer.EventMultiplexer(),\n window_title=\"title foo\",\n )\n self.plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testRoutesProvided(self):\n \"\"\"Tests that the plugin offers the correct routes.\"\"\"\n routes = self.plugin.get_plugin_apps()\n self.assertIsInstance(routes[\"/data/logdir\"], collections.abc.Callable)\n self.assertIsInstance(routes[\"/data/runs\"], collections.abc.Callable)\n\n def testIndex_returnsActualHtml(self):\n \"\"\"Test the format of the root / endpoint.\"\"\"\n response = self.server.get(\"/\")\n self.assertEqual(200, response.status_code)\n self.assertStartsWith(response.headers.get(\"Content-Type\"), \"text/html\")\n html = response.get_data()\n self.assertEqual(html, FAKE_INDEX_HTML)\n\n def testDataPaths_disableAllCaching(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n for path in (\"/data/runs\", \"/data/logdir\"):\n response = self.server.get(path)\n self.assertEqual(200, response.status_code, msg=path)\n self.assertEqual(\"0\", response.headers.get(\"Expires\"), msg=path)\n\n def testEnvironmentForWindowTitle(self):\n \"\"\"Test that the environment route correctly returns the window\n title.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"window_title\"], \"title foo\")\n\n def testEnvironmentForLogdir(self):\n \"\"\"Test that the environment route correctly returns the logdir.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], self.get_temp_dir())\n\n def testLogdir(self):\n \"\"\"Test the format of the data/logdir endpoint.\"\"\"\n parsed_object = self._get_json(self.server, \"/data/logdir\")\n self.assertEqual(parsed_object, {\"logdir\": self.get_temp_dir()})\n\n\nclass CorePluginExperimentMetadataTest(tf.test.TestCase):\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testGetEnvironmentDataWithExperimentMetadata(self):\n \"\"\"Test environment route returns correct metadata about experiment.\"\"\"\n\n class FakeDataProvider(object):\n def data_location(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return \"\"\n\n def experiment_metadata(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return provider.ExperimentMetadata(\n experiment_name=\"Experiment #5 (実験#5)\",\n experiment_description=\"Take five (😊)\",\n creation_time=1234.5,\n )\n\n self.context = base_plugin.TBContext(\n flags=FakeFlags(generic_data=\"true\"),\n data_provider=FakeDataProvider(),\n )\n\n self.plugin = core_plugin.CorePlugin(self.context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], \"\")\n self.assertEqual(parsed_object[\"window_title\"], None)\n self.assertEqual(\n parsed_object[\"experiment_name\"], \"Experiment #5 (実験#5)\"\n )\n self.assertEqual(\n parsed_object[\"experiment_description\"], \"Take five (😊)\"\n )\n self.assertEqual(parsed_object[\"creation_time\"], 1234.5)\n\n def testGetEnvironmentDataWithNoExperimentMetadata(self):\n \"\"\"Test environment route works when no experiment metadata exists.\"\"\"\n\n class FakeDataProvider(object):\n def data_location(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return \"\"\n\n def experiment_metadata(self, ctx, *, experiment_id):\n del experiment_id # Unused.\n return None\n\n self.context = base_plugin.TBContext(\n flags=FakeFlags(generic_data=\"true\"),\n data_provider=FakeDataProvider(),\n )\n\n self.plugin = core_plugin.CorePlugin(self.context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n parsed_object = self._get_json(self.server, \"/data/environment\")\n self.assertEqual(parsed_object[\"data_location\"], \"\")\n self.assertEqual(parsed_object[\"window_title\"], None)\n self.assertNotIn(\"experiment_name\", parsed_object)\n self.assertNotIn(\"experiment_description\", parsed_object)\n self.assertNotIn(\"creation_time\", parsed_object)\n\n\nclass CorePluginTestBase(object):\n def setUp(self):\n super(CorePluginTestBase, self).setUp()\n self.logdir = self.get_temp_dir()\n self.multiplexer = event_multiplexer.EventMultiplexer()\n context = base_plugin.TBContext(\n assets_zip_provider=get_test_assets_zip_provider(),\n logdir=self.logdir,\n multiplexer=self.multiplexer,\n )\n self.plugin = core_plugin.CorePlugin(context)\n app = application.TensorBoardWSGI([self.plugin])\n self.server = werkzeug_test.Client(app, wrappers.BaseResponse)\n\n def create_multiplexer(self):\n raise NotImplementedError()\n\n def _add_run(self, run_name):\n run_path = os.path.join(self.logdir, run_name)\n with test_util.FileWriter(run_path) as writer:\n writer.add_test_summary(\"foo\")\n self.multiplexer.AddRunsFromDirectory(self.logdir)\n self.multiplexer.Reload()\n\n def _get_json(self, server, path):\n response = server.get(path)\n self.assertEqual(200, response.status_code)\n self.assertEqual(\n \"application/json\", response.headers.get(\"Content-Type\")\n )\n return json.loads(response.get_data().decode(\"utf-8\"))\n\n def testRuns(self):\n \"\"\"Test the format of the /data/runs endpoint.\"\"\"\n self._add_run(\"run1\")\n run_json = self._get_json(self.server, \"/data/runs\")\n self.assertEqual(run_json, [\"run1\"])\n\n def testRunsAppendOnly(self):\n \"\"\"Test that new runs appear after old ones in /data/runs.\"\"\"\n fake_wall_times = {\n \"run1\": 1234.0,\n \"avocado\": 2345.0,\n \"zebra\": 3456.0,\n \"ox\": 4567.0,\n \"mysterious\": None,\n \"enigmatic\": None,\n }\n\n def FirstEventTimestamp_stub(run_name):\n matches = [\n candidate_name\n for candidate_name in fake_wall_times\n if run_name.endswith(candidate_name)\n ]\n self.assertEqual(len(matches), 1, \"%s (%s)\" % (matches, run_name))\n wall_time = fake_wall_times[matches[0]]\n if wall_time is None:\n raise ValueError(\"No event timestamp could be found\")\n else:\n return wall_time\n\n with mock.patch.object(\n self.multiplexer, \"FirstEventTimestamp\"\n ) as mock_first_event_timestamp:\n mock_first_event_timestamp.side_effect = FirstEventTimestamp_stub\n # Start with a single run.\n self._add_run(\"run1\")\n\n # Add one run: it should come last.\n self._add_run(\"avocado\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"), [\"run1\", \"avocado\"],\n )\n\n # Add another run: it should come last, too.\n self._add_run(\"zebra\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\"],\n )\n\n # And maybe there's a run for which we somehow have no timestamp.\n self._add_run(\"mysterious\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"mysterious\"],\n )\n\n # Add another timestamped run: it should come before the timestamp-less one.\n self._add_run(\"ox\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"mysterious\"],\n )\n\n # Add another timestamp-less run, lexicographically before the other one:\n # it should come after all timestamped runs but first among timestamp-less.\n self._add_run(\"enigmatic\")\n self.assertEqual(\n self._get_json(self.server, \"/data/runs\"),\n [\"run1\", \"avocado\", \"zebra\", \"ox\", \"enigmatic\", \"mysterious\"],\n )\n\n\ndef get_test_assets_zip_provider():\n memfile = six.BytesIO()\n with zipfile.ZipFile(\n memfile, mode=\"w\", compression=zipfile.ZIP_DEFLATED\n ) as zf:\n zf.writestr(\"index.html\", FAKE_INDEX_HTML)\n return lambda: contextlib.closing(six.BytesIO(memfile.getvalue()))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
madcpt/MachineWontLie
[ "992156f3916bafeaa01a3685eae285550391132e" ]
[ "models/PCA.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.utils.data import DataLoader\nfrom overrides import overrides\nimport numpy as np\nimport time\n\nfrom models.BaseModel import BaseModel\n\n\nclass PCAModel(BaseModel):\n def __init__(self, configs: object):\n super().__init__(configs.model.model_name, configs.device)\n from sklearn.decomposition import PCA\n self.pca_cls = PCA(n_components=30)\n\n from sklearn.svm import SVC\n self.svm_cls = SVC(kernel=\"rbf\", probability=True, )\n\n @overrides\n def train_epoch(self, epoch_num: int, train_loader: DataLoader):\n x = torch.flatten(train_loader.dataset.data, 1).numpy()\n y = train_loader.dataset.targets.numpy()\n self.pca_cls.fit(x, y)\n x_pca = self.pca_cls.transform(x)\n # print(x_pca.shape)\n self.svm_cls.fit(x_pca, y)\n\n @overrides\n def test_epoch(self, epoch_num: int, test_loader: DataLoader):\n x = torch.flatten(test_loader.dataset.data, 1).numpy()\n y = test_loader.dataset.targets.numpy()\n pca_result: np.ndarray = self.pca_cls.transform(x)\n predict_score = self.svm_cls.predict(pca_result)\n predict_result = predict_score\n # predict_result = np.argmax(predict_score,axis=1)\n # print(x.shape, predict_score.shape, predict_result.shape, y.shape)\n results: np.ndarray = predict_result == y\n return sum(results) / len(results)\n\n @overrides\n def run_epochs(self, epochs: int, train_loader: DataLoader, test_loader: DataLoader):\n t1 = time.time()\n self.train_epoch(0, train_loader)\n t2 = time.time()\n acc = self.test_epoch(0, test_loader)\n if self.writer:\n self.writer.add_scalar('test_acc', acc, 0)\n print(acc, t2 - t1, time.time() - t2)\n" ]
[ [ "sklearn.decomposition.PCA", "sklearn.svm.SVC", "torch.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thentgesMindee/doctr
[ "f97e92ba1b7bcb785a60f2cf549f13f88e510609", "f97e92ba1b7bcb785a60f2cf549f13f88e510609", "f97e92ba1b7bcb785a60f2cf549f13f88e510609", "f97e92ba1b7bcb785a60f2cf549f13f88e510609" ]
[ "doctr/models/recognition/predictor/pytorch.py", "doctr/models/backbones/resnet/tensorflow.py", "references/recognition/train_tensorflow.py", "doctr/io/image/base.py" ]
[ "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom typing import Any, List, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom doctr.models.preprocessor import PreProcessor\n\nfrom ._utils import remap_preds, split_crops\n\n__all__ = ['RecognitionPredictor']\n\n\nclass RecognitionPredictor(nn.Module):\n \"\"\"Implements an object able to identify character sequences in images\n\n Args:\n pre_processor: transform inputs for easier batched model inference\n model: core detection architecture\n split_wide_crops: wether to use crop splitting for high aspect ratio crops\n \"\"\"\n\n def __init__(\n self,\n pre_processor: PreProcessor,\n model: nn.Module,\n split_wide_crops: bool = True,\n ) -> None:\n\n super().__init__()\n self.pre_processor = pre_processor\n self.model = model.eval()\n self.split_wide_crops = split_wide_crops\n self.critical_ar = 8 # Critical aspect ratio\n self.dil_factor = 1.4 # Dilation factor to overlap the crops\n self.target_ar = 4 # Target aspect ratio\n\n @torch.no_grad()\n def forward(\n self,\n crops: List[Union[np.ndarray, torch.Tensor]],\n **kwargs: Any,\n ) -> List[Tuple[str, float]]:\n\n if len(crops) == 0:\n return []\n # Dimension check\n if any(crop.ndim != 3 for crop in crops):\n raise ValueError(\"incorrect input shape: all crops are expected to be multi-channel 2D images.\")\n\n # Split crops that are too wide\n remapped = False\n if self.split_wide_crops:\n new_crops, crop_map, remapped = split_crops(\n crops,\n self.critical_ar,\n self.target_ar,\n self.dil_factor,\n isinstance(crops[0], np.ndarray)\n )\n if remapped:\n crops = new_crops\n\n # Resize & batch them\n processed_batches = self.pre_processor(crops)\n\n # Forward it\n raw = [\n self.model(batch, return_preds=True, **kwargs)['preds'] # type: ignore[operator]\n for batch in processed_batches\n ]\n\n # Process outputs\n out = [charseq for batch in raw for charseq in batch]\n\n # Remap crops\n if self.split_wide_crops and remapped:\n out = remap_preds(out, crop_map, self.dil_factor)\n\n return out\n", "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\n\nfrom ...utils import conv_sequence, load_pretrained_params\n\n__all__ = ['ResNet', 'resnet31', 'ResnetStage']\n\n\ndefault_cfgs: Dict[str, Dict[str, Any]] = {\n 'resnet31': {'num_blocks': (1, 2, 5, 3), 'output_channels': (256, 256, 512, 512),\n 'conv_seq': (True, True, True, True), 'pooling': ((2, 2), (2, 1), None, None),\n 'url': None},\n}\n\n\nclass ResnetBlock(layers.Layer):\n\n \"\"\"Implements a resnet31 block with shortcut\n\n Args:\n conv_shortcut: Use of shortcut\n output_channels: number of channels to use in Conv2D\n kernel_size: size of square kernels\n strides: strides to use in the first convolution of the block\n \"\"\"\n def __init__(\n self,\n output_channels: int,\n conv_shortcut: bool,\n strides: int = 1,\n **kwargs\n ) -> None:\n\n super().__init__(**kwargs)\n if conv_shortcut:\n self.shortcut = Sequential(\n [\n layers.Conv2D(\n filters=output_channels,\n strides=strides,\n padding='same',\n kernel_size=1,\n use_bias=False,\n kernel_initializer='he_normal'\n ),\n layers.BatchNormalization()\n ]\n )\n else:\n self.shortcut = layers.Lambda(lambda x: x)\n self.conv_block = Sequential(\n self.conv_resnetblock(output_channels, 3, strides)\n )\n self.act = layers.Activation('relu')\n\n @staticmethod\n def conv_resnetblock(\n output_channels: int,\n kernel_size: int,\n strides: int = 1,\n ) -> List[layers.Layer]:\n return [\n *conv_sequence(output_channels, activation='relu', bn=True, strides=strides, kernel_size=kernel_size),\n layers.Conv2D(output_channels, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal'),\n layers.BatchNormalization(),\n ]\n\n def call(\n self,\n inputs: tf.Tensor\n ) -> tf.Tensor:\n clone = self.shortcut(inputs)\n conv_out = self.conv_block(inputs)\n out = self.act(clone + conv_out)\n\n return out\n\n\nclass ResnetStage(Sequential):\n\n \"\"\"Implements a resnet31 stage\n\n Args:\n num_blocks: number of blocks inside the stage\n output_channels: number of channels to use in Conv2D\n downsample: if true, performs a /2 downsampling at the first block of the stage\n \"\"\"\n def __init__(\n self,\n num_blocks: int,\n output_channels: int,\n downsample: bool = False,\n ) -> None:\n\n super().__init__()\n final_blocks = [\n ResnetBlock(output_channels, conv_shortcut=False) for _ in range(1, num_blocks)\n ]\n if downsample is True:\n self.add(ResnetBlock(output_channels, conv_shortcut=True, strides=2))\n else:\n self.add(ResnetBlock(output_channels, conv_shortcut=True))\n for final_block in final_blocks:\n self.add(final_block)\n\n\nclass ResNet(Sequential):\n\n \"\"\"Resnet class with two convolutions and a maxpooling before the first stage\n\n Args:\n num_blocks: number of resnet block in each stage\n output_channels: number of channels in each stage\n conv_seq: wether to add a conv_sequence after each stage\n pooling: pooling to add after each stage (if None, no pooling)\n input_shape: shape of inputs\n include_top: whether the classifier head should be instantiated\n \"\"\"\n\n def __init__(\n self,\n num_blocks: Tuple[int, int, int, int],\n output_channels: Tuple[int, int, int, int],\n conv_seq: Tuple[bool, bool, bool, bool],\n pooling: Tuple[\n Optional[Tuple[int, int]],\n Optional[Tuple[int, int]],\n Optional[Tuple[int, int]],\n Optional[Tuple[int, int]]\n ],\n input_shape: Tuple[int, int, int] = (640, 640, 3),\n include_top: bool = False,\n ) -> None:\n\n _layers = [\n *conv_sequence(out_channels=64, activation='relu', bn=True, kernel_size=3, input_shape=input_shape),\n *conv_sequence(out_channels=128, activation='relu', bn=True, kernel_size=3),\n layers.MaxPool2D(pool_size=2, strides=2, padding='valid'),\n ]\n for n_blocks, out_channels, conv, pool in zip(num_blocks, output_channels, conv_seq, pooling):\n _layers.append(ResnetStage(n_blocks, out_channels))\n if conv:\n _layers.extend(conv_sequence(out_channels, activation='relu', bn=True, kernel_size=3))\n if pool:\n _layers.append(layers.MaxPool2D(pool_size=pool, strides=pool, padding='valid'))\n super().__init__(_layers)\n\n\ndef _resnet(arch: str, pretrained: bool, **kwargs: Any) -> ResNet:\n\n # Build the model\n model = ResNet(\n default_cfgs[arch]['num_blocks'],\n default_cfgs[arch]['output_channels'],\n default_cfgs[arch]['conv_seq'],\n default_cfgs[arch]['pooling'],\n **kwargs\n )\n # Load pretrained parameters\n if pretrained:\n load_pretrained_params(model, default_cfgs[arch]['url'])\n\n return model\n\n\ndef resnet31(pretrained: bool = False, **kwargs: Any) -> ResNet:\n \"\"\"Resnet31 architecture with rectangular pooling windows as described in\n `\"Show, Attend and Read:A Simple and Strong Baseline for Irregular Text Recognition\",\n <https://arxiv.org/pdf/1811.00751.pdf>`_. Downsizing: (H, W) --> (H/8, W/4)\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import resnet31\n >>> model = resnet31(pretrained=False)\n >>> input_tensor = tf.random.uniform(shape=[1, 224, 224, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained: boolean, True if model is pretrained\n\n Returns:\n A resnet31 model\n \"\"\"\n\n return _resnet('resnet31', pretrained, **kwargs)\n", "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\n\nos.environ['USE_TF'] = '1'\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport datetime\nimport hashlib\nimport time\nfrom collections import deque\nfrom pathlib import Path\n\nimport numpy as np\nimport tensorflow as tf\nfrom fastprogress.fastprogress import master_bar, progress_bar\n\nimport wandb\n\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif any(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nfrom utils import plot_samples\n\nfrom doctr import transforms as T\nfrom doctr.datasets import VOCABS, DataLoader, RecognitionDataset\nfrom doctr.models import recognition\nfrom doctr.utils.metrics import TextMatch\n\n\ndef fit_one_epoch(model, train_loader, batch_transforms, optimizer, loss_q, mb, step, tb_writer=None):\n train_iter = iter(train_loader)\n # Iterate over the batches of the dataset\n for batch_step in progress_bar(range(train_loader.num_batches), parent=mb):\n images, targets = next(train_iter)\n\n images = batch_transforms(images)\n\n with tf.GradientTape() as tape:\n train_loss = model(images, targets, training=True)['loss']\n grads = tape.gradient(train_loss, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n\n mb.child.comment = f'Training loss: {train_loss.numpy().mean():.6}'\n # Update steps\n step.assign_add(args.batch_size)\n # Add loss to queue\n loss_q.append(np.mean(train_loss))\n # Log loss and save weights every 100 batch step\n if batch_step % 100 == 0:\n # Compute loss\n loss = sum(loss_q) / len(loss_q)\n if tb_writer is not None:\n with tb_writer.as_default():\n tf.summary.scalar('train_loss', loss, step=step)\n\n\ndef evaluate(model, val_loader, batch_transforms, val_metric):\n # Reset val metric\n val_metric.reset()\n # Validation loop\n val_loss, batch_cnt = 0, 0\n val_iter = iter(val_loader)\n for images, targets in val_iter:\n images = batch_transforms(images)\n out = model(images, targets, return_preds=True, training=False)\n # Compute metric\n if len(out['preds']):\n words, _ = zip(*out['preds'])\n else:\n words = []\n val_metric.update(targets, words)\n\n val_loss += out['loss'].numpy().mean()\n batch_cnt += 1\n\n val_loss /= batch_cnt\n result = val_metric.summary()\n return val_loss, result['raw'], result['unicase']\n\n\ndef main(args):\n\n print(args)\n\n # Load val data generator\n st = time.time()\n val_set = RecognitionDataset(\n img_folder=os.path.join(args.val_path, 'images'),\n labels_path=os.path.join(args.val_path, 'labels.json'),\n sample_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),\n )\n val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, drop_last=False, workers=args.workers)\n print(f\"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in \"\n f\"{val_loader.num_batches} batches)\")\n with open(os.path.join(args.val_path, 'labels.json'), 'rb') as f:\n val_hash = hashlib.sha256(f.read()).hexdigest()\n\n # Load doctr model\n model = recognition.__dict__[args.arch](\n pretrained=args.pretrained,\n input_shape=(args.input_size, 4 * args.input_size, 3),\n vocab=VOCABS[args.vocab]\n )\n # Resume weights\n if isinstance(args.resume, str):\n model.load_weights(args.resume)\n\n # Tf variable to log steps\n step = tf.Variable(0, dtype=\"int64\")\n\n # Metrics\n val_metric = TextMatch()\n\n batch_transforms = T.Compose([\n T.Normalize(mean=(0.694, 0.695, 0.693), std=(0.299, 0.296, 0.301)),\n ])\n\n if args.test_only:\n print(\"Running evaluation\")\n val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric)\n print(f\"Validation loss: {val_loss:.6} (Exact: {exact_match:.2%} | Partial: {partial_match:.2%})\")\n return\n\n st = time.time()\n\n # Load train data generator\n base_path = Path(args.train_path)\n parts = [base_path] if base_path.joinpath('labels.json').is_file() else [\n base_path.joinpath(sub) for sub in os.listdir(base_path)\n ]\n train_set = RecognitionDataset(\n parts[0].joinpath('images'),\n parts[0].joinpath('labels.json'),\n sample_transforms=T.Compose([\n T.RandomApply(T.ColorInversion(), .1),\n T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),\n # Augmentations\n T.RandomJpegQuality(60),\n T.RandomSaturation(.3),\n T.RandomContrast(.3),\n T.RandomBrightness(.3),\n ]),\n )\n\n if len(parts) > 1:\n for subfolder in parts[1:]:\n train_set.merge_dataset(RecognitionDataset(subfolder.joinpath('images'), subfolder.joinpath('labels.json')))\n\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, workers=args.workers)\n print(f\"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in \"\n f\"{train_loader.num_batches} batches)\")\n with open(parts[0].joinpath('labels.json'), 'rb') as f:\n train_hash = hashlib.sha256(f.read()).hexdigest()\n\n if args.show_samples:\n x, target = next(iter(train_loader))\n plot_samples(x, target)\n return\n\n # Optimizer\n scheduler = tf.keras.optimizers.schedules.ExponentialDecay(\n args.lr,\n decay_steps=args.epochs * len(train_loader),\n decay_rate=1 / (25e4), # final lr as a fraction of initial lr\n staircase=False\n )\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=scheduler,\n beta_1=0.95,\n beta_2=0.99,\n epsilon=1e-6,\n clipnorm=5\n )\n\n # Tensorboard to monitor training\n current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n exp_name = f\"{args.arch}_{current_time}\" if args.name is None else args.name\n\n # Tensorboard\n tb_writer = None\n if args.tb:\n log_dir = Path('logs', exp_name)\n log_dir.mkdir(parents=True, exist_ok=True)\n tb_writer = tf.summary.create_file_writer(str(log_dir))\n\n # W&B\n if args.wb:\n\n run = wandb.init(\n name=exp_name,\n project=\"text-recognition\",\n config={\n \"learning_rate\": args.lr,\n \"epochs\": args.epochs,\n \"weight_decay\": args.weight_decay,\n \"batch_size\": args.batch_size,\n \"architecture\": args.arch,\n \"input_size\": args.input_size,\n \"optimizer\": \"adam\",\n \"framework\": \"tensorflow\",\n \"scheduler\": args.sched,\n \"vocab\": args.vocab,\n \"train_hash\": train_hash,\n \"val_hash\": val_hash,\n \"pretrained\": args.pretrained,\n }\n )\n\n # Create loss queue\n loss_q = deque(maxlen=100)\n min_loss = np.inf\n\n # Training loop\n mb = master_bar(range(args.epochs))\n for epoch in mb:\n fit_one_epoch(model, train_loader, batch_transforms, optimizer, loss_q, mb, step, tb_writer)\n\n # Validation loop at the end of each epoch\n val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric)\n if val_loss < min_loss:\n print(f\"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...\")\n model.save_weights(f'./{exp_name}/weights')\n min_loss = val_loss\n mb.write(f\"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} \"\n f\"(Exact: {exact_match:.2%} | Partial: {partial_match:.2%})\")\n # Tensorboard\n if args.tb:\n with tb_writer.as_default():\n tf.summary.scalar('val_loss', val_loss, step=step)\n tf.summary.scalar('exact_match', exact_match, step=step)\n tf.summary.scalar('partial_match', partial_match, step=step)\n # W&B\n if args.wb:\n wandb.log({\n 'val_loss': val_loss,\n 'exact_match': exact_match,\n 'partial_match': partial_match,\n })\n #reset val metric\n val_metric.reset()\n\n if args.wb:\n run.finish()\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser(description='DocTR training script for text recognition (TensorFlow)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('train_path', type=str, help='path to train data folder(s)')\n parser.add_argument('val_path', type=str, help='path to val data folder')\n parser.add_argument('arch', type=str, help='text-recognition model to train')\n parser.add_argument('--name', type=str, default=None, help='Name of your training experiment')\n parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train the model on')\n parser.add_argument('-b', '--batch_size', type=int, default=64, help='batch size for training')\n parser.add_argument('--input_size', type=int, default=32, help='input size H for the model, W = 4*H')\n parser.add_argument('--lr', type=float, default=0.001, help='learning rate for the optimizer (Adam)')\n parser.add_argument('-j', '--workers', type=int, default=4, help='number of workers used for dataloading')\n parser.add_argument('--resume', type=str, default=None, help='Path to your checkpoint')\n parser.add_argument('--vocab', type=str, default=\"french\", help='Vocab to be used for training')\n parser.add_argument(\"--test-only\", dest='test_only', action='store_true', help=\"Run the validation loop\")\n parser.add_argument('--show-samples', dest='show_samples', action='store_true',\n help='Display unormalized training samples')\n parser.add_argument('--tb', dest='tb', action='store_true',\n help='Log to Tensorboard')\n parser.add_argument('--wb', dest='wb', action='store_true',\n help='Log to Weights & Biases')\n parser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='Load pretrained parameters before starting the training')\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n", "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport cv2\nimport numpy as np\n\nfrom doctr.utils.common_types import AbstractFile\n\n__all__ = ['read_img_as_numpy']\n\n\ndef read_img_as_numpy(\n file: AbstractFile,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Read an image file into numpy format\n\n Example::\n >>> from doctr.documents import read_img\n >>> page = read_img(\"path/to/your/doc.jpg\")\n\n Args:\n file: the path to the image file\n output_size: the expected output size of each page in format H x W\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n Returns:\n the page decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n if isinstance(file, (str, Path)):\n if not Path(file).is_file():\n raise FileNotFoundError(f\"unable to access {file}\")\n img = cv2.imread(str(file), cv2.IMREAD_COLOR)\n elif isinstance(file, bytes):\n file = np.frombuffer(file, np.uint8)\n img = cv2.imdecode(file, cv2.IMREAD_COLOR)\n else:\n raise TypeError(\"unsupported object type for argument 'file'\")\n\n # Validity check\n if img is None:\n raise ValueError(\"unable to read file.\")\n # Resizing\n if isinstance(output_size, tuple):\n img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n" ]
[ [ "torch.no_grad" ], [ "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Lambda", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.MaxPool2D", "tensorflow.keras.layers.BatchNormalization" ], [ "tensorflow.Variable", "tensorflow.config.experimental.set_memory_growth", "tensorflow.config.experimental.list_physical_devices", "tensorflow.keras.optimizers.Adam", "numpy.mean", "tensorflow.summary.scalar", "tensorflow.GradientTape" ], [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
goodcq/CommPy
[ "af3a9acba32d2f9c6b723705f709fee2cb9352e2" ]
[ "commpy/tests/test_channels.py" ]
[ "# Authors: CommPy contributors\n# License: BSD 3-Clause\n\nfrom __future__ import division, print_function # Python 2 compatibility\n\nfrom math import cos\n\nfrom numpy import ones, inf, sqrt, array, identity, zeros, dot, trace, einsum, absolute, exp, pi, fromiter, kron, \\\n zeros_like, empty\nfrom numpy.random import seed, choice, randn\nfrom numpy.testing import run_module_suite, assert_raises, assert_equal, assert_allclose, \\\n assert_array_equal, dec\n\nfrom commpy.channels import SISOFlatChannel, MIMOFlatChannel\nfrom commpy.utilities import signal_power\n\n\nclass TestSISOFlatChannel:\n msg_length = 100000\n real_mods = array((-1, 1)), array((-3, 3))\n all_mods = array((-1, 1)), array((-3, 3)), \\\n array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))\n\n def test_default_args(self):\n def check(chan):\n assert_equal(chan.noises, None,\n err_msg='Default noises is not None')\n assert_equal(chan.channel_gains, None,\n err_msg='Default channel gains is not None')\n assert_equal(chan.unnoisy_output, None,\n err_msg='Default unnoisy output is not None')\n\n chan = SISOFlatChannel()\n\n # Test output state before any propagation\n check(chan)\n\n # Test that noise standard deviation must be set before propagation\n with assert_raises(AssertionError):\n chan.propagate(array((1, 1)))\n\n # Test output state before any propagation\n check(chan)\n\n assert_equal(chan.nb_rx, 1,\n err_msg='SISO channel as more than 1 Rx')\n assert_equal(chan.nb_tx, 1,\n err_msg='SISO channel as more than 1 Tx')\n\n def test_fading(self):\n # Set seed\n seed(17121996)\n\n def check_chan_gain(mod, chan):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg)\n P_unnoisy = signal_power(chan.unnoisy_output)\n\n assert_allclose(P_unnoisy, P_msg, rtol=0.2,\n err_msg='Channel add or remove energy')\n\n # Test value checking in constructor construction\n with assert_raises(ValueError):\n SISOFlatChannel(0, (1, 1))\n\n chan = SISOFlatChannel(0)\n\n # Test on real channel\n for mod in self.real_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (1, 1)\n\n # Test without fading\n chan.fading_param = (1, 0)\n check_chan_gain(mod, chan)\n assert_array_equal(chan.channel_gains, ones(self.msg_length),\n err_msg='Channel fading while fading is disabled')\n\n # Test with Rayleigh fading\n chan.fading_param = (0, 1)\n check_chan_gain(mod, chan)\n assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 1, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n # Test with rician fading\n chan.fading_param = (sqrt(2 / 3), 1 / 3)\n check_chan_gain(mod, chan)\n assert_allclose(chan.channel_gains.mean(), sqrt(2 / 3), atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 1 / 3, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n # Test on complex channel\n for mod in self.all_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (1, 1)\n\n # Test without fading\n chan.fading_param = (1 + 0j, 0)\n check_chan_gain(mod, chan)\n assert_array_equal(chan.channel_gains, ones(self.msg_length),\n err_msg='Channel fading while fading is disabled')\n\n # Test with Rayleigh fading\n chan.fading_param = (0j, 1)\n check_chan_gain(mod, chan)\n assert_allclose(absolute(chan.channel_gains.mean()), 0, atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 1, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n # Test with rician fading\n chan.fading_param = (0.5 + 0.5j, 0.5)\n check_chan_gain(mod, chan)\n assert_allclose(absolute(chan.channel_gains.mean()), sqrt(0.5), atol=2e-2,\n err_msg='Wrong channel mean with real channel')\n assert_allclose(chan.channel_gains.var(), 0.5, atol=0.2,\n err_msg='Wrong channel variance with real channel')\n\n def test_noise_generation(self):\n # Set seed\n seed(17121996)\n\n def check_noise(mod, chan, corrected_SNR_lin):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy\n P_noise = signal_power(chan.noises)\n\n assert_allclose(absolute(chan.noises.mean()), 0., atol=5e-2,\n err_msg='Noise mean is not 0')\n if corrected_SNR_lin == inf:\n assert_allclose(P_noise, 0, atol=1e-2,\n err_msg='There is noise that should not be here')\n else:\n assert_allclose(P_msg / P_noise, corrected_SNR_lin, atol=0.2,\n err_msg='Wrong SNR')\n\n chan = SISOFlatChannel(fading_param=(1 + 0j, 0))\n for mod in self.all_mods:\n chan.noise_std = 0\n check_noise(mod, chan, inf)\n chan.set_SNR_lin(6, Es=signal_power(mod))\n check_noise(mod, chan, 6)\n chan.set_SNR_lin(6, .5, signal_power(mod))\n check_noise(mod, chan, 3)\n chan.set_SNR_dB(0, Es=signal_power(mod))\n check_noise(mod, chan, 1)\n chan.set_SNR_dB(0, .5, signal_power(mod))\n check_noise(mod, chan, .5)\n\n chan = SISOFlatChannel(fading_param=(1, 0))\n for mod in self.real_mods:\n chan.noise_std = 0\n check_noise(mod, chan, inf)\n chan.set_SNR_lin(6, Es=signal_power(mod))\n check_noise(mod, chan, 6)\n chan.set_SNR_lin(6, .5, signal_power(mod))\n check_noise(mod, chan, 3)\n chan.set_SNR_dB(0, Es=signal_power(mod))\n check_noise(mod, chan, 1)\n chan.set_SNR_dB(0, .5, signal_power(mod))\n check_noise(mod, chan, .5)\n\n def test_type_check(self):\n chan = SISOFlatChannel(0)\n with assert_raises(TypeError):\n chan.propagate(array((1, 1j)))\n\n def test_k_factor(self):\n # Real channel\n chan = SISOFlatChannel()\n assert_allclose(chan.k_factor, inf,\n err_msg='k-factor should be infinite without fading in SISO channels')\n chan.fading_param = 0, 1\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n chan.fading_param = sqrt(0.5), 0.5\n assert_allclose(chan.k_factor, 1,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n # Complex channel\n chan.fading_param = 1j, 0\n assert_allclose(chan.k_factor, inf,\n err_msg='k-factor should be infinite without fading in SISO channels')\n chan.fading_param = 0j, 1\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n chan.fading_param = 0.5 + 0.5j, 0.5\n assert_allclose(chan.k_factor, 1,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n\nclass MIMOTestCase(object):\n msg_length = 100000\n real_mods = array((-1, 1)), array((-3, 3))\n all_mods = array((-1, 1)), array((-3, 3)), \\\n array((-1 - 1j, -1 + 1j, 1 - 1j, 1 + 1j)), array((-3 - 3j, -3 + 3j, 3 - 3j, 3 + 3j))\n\n @staticmethod\n def random_SDP_matrix(n):\n G = randn(n, n)\n dot(G, G.T, G)\n return G / trace(G)\n\n def test_symetric(self):\n nb_tx = 8\n nb_rx = 8\n self.do(nb_tx, nb_rx)\n\n def test_more_rx(self):\n nb_tx = 4\n nb_rx = 8\n self.do(nb_tx, nb_rx)\n\n def test_more_tx(self):\n nb_tx = 8\n nb_rx = 4\n self.do(nb_tx, nb_rx)\n\n def test_SIMO(self):\n nb_tx = 1\n nb_rx = 8\n self.do(nb_tx, nb_rx)\n\n def test_MISO(self):\n nb_tx = 8\n nb_rx = 1\n self.do(nb_tx, nb_rx)\n\n def test_SISO(self):\n nb_tx = 1\n nb_rx = 1\n self.do(nb_tx, nb_rx)\n\n\nclass TestMIMODefaultArgs(MIMOTestCase):\n def __init__(self):\n super(TestMIMODefaultArgs, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n def check(chan):\n assert_equal(chan.noises, None,\n err_msg='Default noises is not None')\n assert_equal(chan.channel_gains, None,\n err_msg='Default channel gains is not None')\n assert_equal(chan.unnoisy_output, None,\n err_msg='Default unnoisy output is not None')\n\n chan = MIMOFlatChannel(nb_tx, nb_rx)\n\n # Test output state before any propagation\n check(chan)\n\n # Test that noise standard deviation must be set before propagation\n with assert_raises(AssertionError):\n chan.propagate(array((1, 1)))\n\n # Test output state before any propagation\n check(chan)\n\n\[email protected]\nclass TestMIMOFading(MIMOTestCase):\n def __init__(self):\n super(TestMIMOFading, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Set seed\n seed(17121996)\n\n def check_chan_gain(mod, chan):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg)\n P_unnoisy = signal_power(chan.unnoisy_output)\n\n assert_allclose(P_unnoisy, P_msg * chan.nb_tx, rtol=0.2,\n err_msg='Channel add or remove energy')\n\n def expo_correlation(t, r):\n # Construct the exponent matrix\n expo_tx = fromiter((j - i for i in range(chan.nb_tx) for j in range(chan.nb_tx)), int, chan.nb_tx ** 2)\n expo_rx = fromiter((j - i for i in range(chan.nb_rx) for j in range(chan.nb_rx)), int, chan.nb_rx ** 2)\n\n # Reshape\n expo_tx = expo_tx.reshape(chan.nb_tx, chan.nb_tx)\n expo_rx = expo_rx.reshape(chan.nb_rx, chan.nb_rx)\n\n return t ** expo_tx, r ** expo_rx\n\n def check_correlation(chan, Rt, Rr):\n nb_ant = chan.nb_tx * chan.nb_rx\n Rdes = kron(Rt, Rr)\n H = chan.channel_gains\n Ract = zeros_like(Rdes)\n for i in range(len(H)):\n Ract += H[i].T.reshape(nb_ant, 1).dot(H[i].T.reshape(1, nb_ant).conj())\n Ract /= len(H)\n assert_allclose(Rdes, Ract, atol=0.05,\n err_msg='Wrong correlation matrix')\n\n # Test value checking in constructor construction\n with assert_raises(ValueError):\n MIMOFlatChannel(nb_tx, nb_tx, 0, (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx))))\n\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n prod_nb = nb_tx * nb_rx\n\n # Test on real channel\n for mod in self.real_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))\n\n # Test with Rayleigh fading\n chan.fading_param = (zeros((nb_rx, nb_tx)), identity(nb_tx), identity(nb_rx))\n check_chan_gain(mod, chan)\n\n # Test with rician fading\n mean = randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = (mean, Rt, Rr)\n check_chan_gain(mod, chan)\n\n # Test helper functions\n chan.uncorr_rayleigh_fading(float)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with uncorrelated Rayleigh fading')\n\n mean = randn(nb_rx, nb_tx)\n chan.uncorr_rician_fading(mean, 10)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 10,\n err_msg='Wrong k-factor with uncorrelated rician fading')\n\n # Test on complex channel\n for mod in self.all_mods:\n # Test value checking after construction\n with assert_raises(ValueError):\n chan.fading_param = (ones((nb_tx, nb_tx)), ones((nb_tx, nb_tx)), ones((nb_rx, nb_rx)))\n\n # Test with Rayleigh fading\n chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))\n check_chan_gain(mod, chan)\n assert_allclose(chan.channel_gains.mean(), 0, atol=1e-2,\n err_msg='Wrong channel mean with complex channel')\n assert_allclose(chan.channel_gains.var(), 1, atol=5e-2,\n err_msg='Wrong channel variance with complex channel')\n\n # Test with rician fading\n mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rt = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = (mean, Rt, Rr)\n check_chan_gain(mod, chan)\n\n assert_allclose(chan.channel_gains.mean(0).real, mean.real, atol=0.1,\n err_msg='Wrong channel mean with complex channel')\n assert_allclose(chan.channel_gains.mean(0).imag, mean.imag, atol=0.1,\n err_msg='Wrong channel mean with complex channel')\n\n # Test helper functions\n chan.uncorr_rayleigh_fading(complex)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with uncorrelated Rayleigh fading')\n\n mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j\n chan.uncorr_rician_fading(mean, 10)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 10,\n err_msg='Wrong k-factor with uncorrelated rician fading')\n\n chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi))\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with correlated Rayleigh fading')\n Rt, Rr = expo_correlation(exp(-0.2j * pi), exp(-0.1j * pi))\n check_correlation(chan, Rt, Rr)\n\n mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j\n chan.expo_corr_rician_fading(mean, 10, exp(-0.1j * pi), exp(-0.2j * pi))\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 10,\n err_msg='Wrong k-factor with correlated rician fading')\n\n # Test with beta > 0\n chan.expo_corr_rayleigh_fading(exp(-0.2j * pi), exp(-0.1j * pi), 1, 0.5)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 0,\n err_msg='Wrong k-factor with correlated Rayleigh fading')\n\n mean = randn(nb_rx, nb_tx) + randn(nb_rx, nb_tx) * 1j\n chan.expo_corr_rician_fading(mean, 5, exp(-0.1j * pi), exp(-0.2j * pi), 3, 2)\n check_chan_gain(mod, chan)\n assert_allclose(chan.k_factor, 5,\n err_msg='Wrong k-factor with correlated rician fading')\n\n\nclass TestMIMOSpectular(MIMOTestCase):\n def __init__(self):\n super(TestMIMOSpectular, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n\n # Test raising of ValueError\n with assert_raises(ValueError):\n chan.specular_compo(0, -1, 0, 1)\n with assert_raises(ValueError):\n chan.specular_compo(0, 1, 0, -1)\n\n # Test the result\n desired = empty((nb_rx, nb_tx), dtype=complex)\n for n in range(nb_rx):\n for m in range(nb_tx):\n desired[n, m] = exp(1j * 2 * pi * (n * 1 * cos(0.5) - m * 0.1 * cos(2)))\n assert_allclose(chan.specular_compo(2, 0.1, 0.5, 1), desired, rtol=0.02,\n err_msg='Wrong specular component')\n\n\[email protected]\nclass TestMIMONoiseGeneration(MIMOTestCase):\n def __init__(self):\n super(TestMIMONoiseGeneration, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Set seed\n seed(17121996)\n\n def check_noise(mod, chan, corrected_SNR_lin):\n msg = choice(mod, self.msg_length)\n chan.propagate(msg)\n\n P_msg = signal_power(msg) # previous test asserted that channel neither add nor remove energy\n P_noise = signal_power(chan.noises)\n\n assert_allclose(abs(chan.noises.mean()), 0., atol=0.5,\n err_msg='Noise mean is not 0')\n if corrected_SNR_lin == inf:\n assert_allclose(P_noise, 0, atol=1e-2,\n err_msg='There is noise that should not be here')\n else:\n assert_allclose(chan.nb_tx * P_msg / P_noise, corrected_SNR_lin, atol=0.2,\n err_msg='Wrong SNR')\n\n fading_param = zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx)\n chan = MIMOFlatChannel(nb_tx, nb_rx, fading_param=fading_param)\n for mod in self.all_mods:\n chan.noise_std = 0\n check_noise(mod, chan, inf)\n chan.set_SNR_lin(6, Es=signal_power(mod))\n check_noise(mod, chan, 6)\n chan.set_SNR_lin(6, .5, signal_power(mod))\n check_noise(mod, chan, 3)\n chan.set_SNR_dB(0, Es=signal_power(mod))\n check_noise(mod, chan, 1)\n chan.set_SNR_dB(0, .5, signal_power(mod))\n check_noise(mod, chan, .5)\n\n\nclass TestMIMOTypeCheck(MIMOTestCase):\n def __init__(self):\n super(TestMIMOTypeCheck, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n with assert_raises(TypeError):\n chan.propagate(array((1, 1j)))\n\n\nclass TestMIMOShapes(MIMOTestCase):\n def __init__(self):\n super(TestMIMOShapes, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Without padding\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n out = chan.propagate(ones(nb_tx * 2))\n assert_array_equal(chan.channel_gains.shape, (2, nb_rx, nb_tx),\n err_msg='Wrong channel shape without padding')\n assert_array_equal(chan.noises.shape, (2, nb_rx),\n err_msg='Wrong channel shape without padding')\n assert_array_equal(chan.unnoisy_output.shape, (2, nb_rx),\n err_msg='Wrong channel shape without padding')\n assert_array_equal(out.shape, (2, nb_rx),\n err_msg='Wrong channel shape without padding')\n\n # With padding\n chan = MIMOFlatChannel(nb_tx, nb_rx, 0)\n out = chan.propagate(ones(nb_tx * 2 + 1))\n assert_array_equal(chan.channel_gains.shape, (3, nb_rx, nb_tx),\n err_msg='Wrong channel shape with padding')\n assert_array_equal(chan.noises.shape, (3, nb_rx),\n err_msg='Wrong channel shape with padding')\n assert_array_equal(chan.unnoisy_output.shape, (3, nb_rx),\n err_msg='Wrong channel shape with padding')\n assert_array_equal(out.shape, (3, nb_rx),\n err_msg='Wrong channel shape with padding')\n\n\nclass TestMIMOkFactor(MIMOTestCase):\n def __init__(self):\n super(TestMIMOkFactor, self).__init__()\n\n def do(self, nb_tx, nb_rx):\n # Set seed\n seed(17121996)\n\n prod_nb = nb_tx * nb_rx\n\n # Real channel\n chan = MIMOFlatChannel(nb_tx, nb_rx)\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n mean = randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = mean, Rs, Rr\n assert_allclose(chan.k_factor, 3,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n # Complex channel\n chan.fading_param = (zeros((nb_rx, nb_tx), complex), identity(nb_tx), identity(nb_rx))\n assert_allclose(chan.k_factor, 0,\n err_msg='k-factor should be 0 with Rayleigh fading in SISO channels')\n mean = randn(nb_rx, nb_tx) + 1j * randn(nb_rx, nb_tx)\n mean *= sqrt(prod_nb * 0.75 / einsum('ij,ij->', absolute(mean), absolute(mean)))\n Rs = self.random_SDP_matrix(nb_tx) * sqrt(prod_nb) * 0.5\n Rr = self.random_SDP_matrix(nb_rx) * sqrt(prod_nb) * 0.5\n chan.fading_param = (mean, Rs, Rr)\n assert_allclose(chan.k_factor, 3,\n err_msg='Wrong k-factor with rician fading in SISO channels')\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.kron", "numpy.random.randn", "numpy.zeros_like", "numpy.exp", "numpy.trace", "numpy.testing.assert_equal", "numpy.zeros", "numpy.random.choice", "numpy.testing.assert_raises", "numpy.identity", "numpy.testing.assert_allclose", "numpy.array", "numpy.testing.run_module_suite", "numpy.absolute", "numpy.random.seed", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hovey/py3DViewer
[ "7ae1697aa4860430d0d94b854f8b1f2a4b2d895f" ]
[ "Py3DViewer/structures/Trimesh.py" ]
[ "from .Abstractmesh import AbstractMesh\nimport numpy as np\nfrom ..utils import IO, ObservableArray, deprecated, utilities\nfrom ..utils.load_operations import get_connectivity_info_surface as get_connectivity_info \nfrom ..utils.load_operations import compute_vertex_normals, compute_face_normals\nfrom ..utils.load_operations import _compute_three_vertex_normals as compute_three_normals\nfrom ..utils.metrics import triangle_aspect_ratio, triangle_area\n\n\nclass Trimesh(AbstractMesh):\n \"\"\"\n This class represents a mesh composed of triangles. It is possible to load the mesh from a file or\n from raw geometry and topology data.\n\n Parameters:\n\n filename (string): The name of the file to load \n vertices (Array (Nx3) type=float): The list of vertices of the mesh\n polys (Array (Nx3) type=int): The list of polygons of the mesh\n labels (Array (Nx1) type=int): The list of labels of the mesh (Optional)\n\n\n \"\"\"\n\n def __init__(self, filename=None, vertices=None, polys=None, labels=None, texture=None, mtl=None, smoothness=False):\n\n super(Trimesh, self).__init__()\n \n self.vtx_normals = None # npArray (Nx3)\n self.poly_normals = None # npArray (Nx3)\n self.texture = texture\n self.material = {}\n self.groups = {}\n self.smoothness = smoothness\n\n self.__map_poly_indices = []\n\n\n if mtl is not None:\n self.__load_from_file(mtl)\n\n if filename is not None:\n self.__load_from_file(filename)\n self._AbstractMesh__filename = filename.split('/')[-1]\n\n elif vertices is not None and polys is not None:\n\n vertices = np.array(vertices)\n polys = np.array(polys)\n self.vertices = ObservableArray(vertices.shape)\n self.vertices[:] = vertices\n self.vertices.attach(self)\n self._AbstractMesh__polys = ObservableArray(polys.shape, dtype=np.int64)\n self._AbstractMesh__polys[:] = polys\n self._AbstractMesh__polys.attach(self)\n self.__load_operations()\n\n if labels is not None:\n labels = np.array(labels)\n assert(labels.shape[0] == polys.shape[0])\n self.labels = ObservableArray(labels.shape, dtype=np.int)\n self.labels[:] = labels\n self.labels.attach(self)\n else:\n self.labels = ObservableArray(polys.shape[0], dtype=np.int)\n self.labels[:] = np.zeros(self.labels.shape, dtype=np.int)\n self.labels.attach(self)\n \n self._AbstractMesh__poly_size = 3\n self._AbstractMesh__finished_loading = True\n \n\n # ==================== METHODS ==================== # \n\n\n def __load_operations(self):\n self._dont_update = True\n self._AbstractMesh__boundary_needs_update = True\n self._AbstractMesh__simplex_centroids = None\n\n \n self._AbstractMesh__edges, \\\n self._AbstractMesh__adj_vtx2vtx, \\\n self._AbstractMesh__adj_vtx2edge, \\\n self._AbstractMesh__adj_vtx2poly, \\\n self._AbstractMesh__adj_edge2vtx, \\\n self._AbstractMesh__adj_edge2edge, \\\n self._AbstractMesh__adj_edge2poly, \\\n self._AbstractMesh__adj_poly2vtx, \\\n self._AbstractMesh__adj_poly2edge, \\\n self._AbstractMesh__adj_poly2poly = get_connectivity_info(self.num_vertices, self.polys)\n \n\n self._AbstractMesh__update_bounding_box()\n self.reset_clipping()\n self.poly_normals = compute_face_normals(self.vertices, self.polys)\n self.vtx_normals = compute_vertex_normals(self.poly_normals, self.adj_vtx2poly._NList__list)\n self.__compute_metrics()\n self._AbstractMesh__simplex_centroids = None\n\n self._dont_update = False\n self.update()\n\n def __load_from_file(self, filename):\n\n ext = filename.split('.')[-1]\n\n if ext == 'obj':\n self.vertices, self._AbstractMesh__polys, self.poly_normals, self.uvcoords, self.coor, self.groups = IO.read_obj(filename)\n # self.vertices, self.faces, self.face_normals = IO.read_obj(filename)\n self.vertices.attach(self)\n self._AbstractMesh__polys.attach(self)\n self.poly_normals.attach(self)\n self.uvcoords.attach(self)\n self.coor.attach(self)\n elif ext == 'mtl':\n self.material = IO.read_mtl(filename)\n return\n\n elif ext == 'off':\n self.vertices, self._AbstractMesh__polys = IO.read_off(filename)\n self.vertices.attach(self)\n self._AbstractMesh__polys.attach(self)\n\n elif ext == 'mesh':\n self.vertices, self._AbstractMesh__polys, labels = IO.read_mesh(filename)\n self.vertices.attach(self)\n self._AbstractMesh__polys.attach(self)\n\n else:\n raise Exception(\"Only .obj, .off and .mesh files are supported\")\n\n self.labels = ObservableArray(self.num_polys, dtype=np.int)\n self.labels[:] = np.zeros(self.labels.shape, dtype=np.int) if ext != 'mesh' else labels\n self.labels.attach(self)\n\n self.__load_operations()\n\n return self\n\n def save_file(self, filename):\n\n \"\"\"\n Save the current mesh in a file. Currently it supports the .obj extension. \n\n Parameters:\n\n filename (string): The name of the file\n\n \"\"\"\n\n ext = filename.split('.')[-1]\n\n if ext == 'obj':\n IO.save_obj(self, filename)\n elif ext == 'off':\n IO.save_off(self, filename)\n elif ext == 'mesh':\n IO.save_mesh(self, filename)\n else:\n raise Exception(\"Only .obj, .off and .mesh files are supported\")\n\n def __compute_metrics(self):\n\n self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)\n self.simplex_metrics['aspect_ratio'] = triangle_aspect_ratio(self.vertices, self.polys)\n\n def update_metrics(self):\n self.__compute_metrics()\n\n @property\n def _map_poly_indices(self):\n return self.__map_poly_indices\n\n def boundary(self):\n\n \"\"\"\n Compute the boundary of the current mesh. It only returns the faces that are inside the clipping\n \"\"\"\n if (self._AbstractMesh__boundary_needs_update):\n clipping_range = super(Trimesh, self).boundary()\n self._AbstractMesh__visible_polys = clipping_range \n self._AbstractMesh__boundary_cached = clipping_range\n self._AbstractMesh__boundary_needs_update = False\n\n self.__map_poly_indices = []\n counter = 0\n for c in clipping_range:\n if c:\n self.__map_poly_indices.append(counter)\n else:\n counter = counter + 1\n\n return self.polys[self._AbstractMesh__boundary_cached], self._AbstractMesh__boundary_cached\n\n def as_edges_flat(self):\n # Faces inside the bounding box\n boundaries = self.boundary()[0]\n # Insert into a vertical array all the correspondences between all the vertices collapsed in one dimension\n edges = np.c_[boundaries[:, :2], boundaries[:, 1:], boundaries[:, 2], boundaries[:, 0]].flatten()\n # edges_flat = self.vertices[edges].tolist()\n return edges\n\n def _as_threejs_triangle_soup(self):\n\n tris = self.vertices[self.boundary()[0].flatten()]\n return tris.astype(np.float32), compute_three_normals(tris).astype(np.float32)\n\n def as_triangles(self):\n return self.boundary()[0].flatten().astype(\"uint32\")\n\n def _as_threejs_colors(self, colors=None):\n\n if colors is not None:\n return np.repeat(colors, 3, axis=0)\n return np.repeat(self.boundary()[1], 3)\n \n @property\n def num_triangles(self):\n return self.num_polys\n\n\n \n def vertex_remove(self, vtx_id):\n\n \"\"\"\n Remove a vertex from the current mesh. It affects the mesh geometry. \n\n Parameters:\n\n vtx_id (int): The index of the vertex to remove \n\n \"\"\"\n\n self.vertices_remove([vtx_id])\n\n def vertices_remove(self, vtx_ids):\n \"\"\"\n Remove a list of vertices from the current mesh. It affects the mesh geometry. \n\n Parameters:\n\n vtx_ids (Array (Nx1 / 1xN) type=int): List of vertices to remove. Each vertex is in the form [int]\n\n \"\"\"\n self._dont_update = True\n vtx_ids = np.array(vtx_ids)\n\n for v_id in vtx_ids:\n\n self.vertices = np.delete(self.vertices, v_id, 0)\n condition = ((self._AbstractMesh__polys[:, 0] != v_id) &\n (self._AbstractMesh__polys[:, 1] != v_id) &\n (self._AbstractMesh__polys[:, 2] != v_id))\n\n if self.labels is not None:\n self.labels = self.labels[condition]\n\n self._AbstractMesh__polys = self._AbstractMesh__polys[condition]\n\n self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 0] > v_id)] -= np.array([1, 0, 0])\n self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 1] > v_id)] -= np.array([0, 1, 0])\n self._AbstractMesh__polys[(self._AbstractMesh__polys[:, 2] > v_id)] -= np.array([0, 0, 1])\n\n vtx_ids[vtx_ids > v_id] -= 1\n\n self.__load_operations()\n\n def poly_add(self, new_poly):\n \"\"\"\n Add a new face to the current mesh. It affects the mesh topology. \n\n Parameters:\n\n new_poly (Array (Nx1) type=int): Poly to add in the form [int, ..., int]\n\n \"\"\"\n self.polys_add(new_poly)\n\n def polys_add(self, new_polys):\n\n \"\"\"\n Add a list of new faces to the current mesh. It affects the mesh topology. \n\n Parameters:\n\n new_polys (Array (NxM) type=int): List of faces to add. Each face is in the form [int, ..., int]\n \"\"\"\n\n AbstractMesh.polys_add(self, new_polys)\n self.__load_operations()\n \n\n\n def poly_remove(self, poly_id):\n\n \"\"\"\n Remove a poly from the current mesh. It affects the mesh topology. \n\n Parameters:\n\n poly_id (int): The index of the face to remove \n\n \"\"\"\n\n self.polys_remove([poly_id])\n\n \n def polys_remove(self, poly_ids):\n\n \"\"\"\n Remove a list of polys from the current mesh. It affects the mesh topology. \n\n Parameters:\n\n poly_ids (Array (Nx1 / 1xN) type=int): List of polys to remove. Each face is in the form [int]\n\n \"\"\"\n AbstractMesh.polys_remove(self, poly_ids)\n self.__load_operations()\n \n def tessellate(self):\n return self.polys\n \n @property\n def edge_is_manifold(self):\n val = self.edge_valence\n return np.logical_and(val > 0, val < 3)\n\n \n @property\n def poly_is_on_boundary(self):\n return np.logical_not(np.all(self.adj_poly2poly != -1, axis = 1))\n \n @property\n def edge_is_on_boundary(self):\n boundary_edges = self.adj_poly2edge[self.poly_is_on_boundary].reshape(-1)\n boundary_edges = [e for e in boundary_edges if len(self.adj_edge2poly[e]) == 1]\n bool_vec = np.zeros((self.num_edges), dtype=np.bool)\n bool_vec[boundary_edges] = True\n return bool_vec\n \n @property\n def vert_is_on_boundary(self):\n boundary_verts = self.edges[self.edge_is_on_boundary].reshape(-1)\n bool_vec = np.zeros((self.num_vertices), dtype=np.bool)\n bool_vec[boundary_verts] = True\n return bool_vec\n\n \n @property\n def area(self):\n return np.sum(self.simplex_metrics['area'][1])\n\n def normalize_area(self):\n scale_factor = 1.0/np.sqrt(self.area)\n self.transform_scale([scale_factor, scale_factor, scale_factor])\n self.simplex_metrics['area'] = triangle_area(self.vertices, self.polys)\n \n def sharp_creases(self, threshold=1.0472):\n e2p = self.adj_edge2poly.array\n indices = np.logical_not(np.all(e2p != -1, axis=1)) \n angles = utilities.angle_between_vectors(self.poly_normals[e2p[:,0]], self.poly_normals[e2p[:,1]], True)[0]\n result = angles > threshold\n result[indices] = True\n return result\n \n def fix_poly_order():\n normals = self.poly_normals\n center = self.mesh_centroid\n a = (normals-center)\n norm = np.linalg.norm(a, axis=1)\n norm.shape = (-1,1)\n a /= norm\n condition = np.einsum(\"ij,ij->i\", a, normals) > 0\n self.polys[condition] = np.flip(mesh.polys[condition], axis=1)\n self.__load_operations()\n \n\n #deprecated\n @property\n @deprecated(\"Use the method adj_poly2poly instead\")\n def face2face(self):\n return self._AbstractMesh__adj_poly2poly\n" ]
[ [ "numpy.sqrt", "numpy.logical_and", "numpy.einsum", "numpy.linalg.norm", "numpy.all", "numpy.delete", "numpy.flip", "numpy.repeat", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tianjiashuo/akg
[ "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817", "a9cbf642063fb1086a93e8bc6be6feb145689817" ]
[ "tests/common/test_run/sqrt_run.py", "tests/common/test_run/ascend/logsigmoid_ad_run.py", "tests/common/test_run/ascend/upsampling_run.py", "tests/common/test_run/ascend/reduce_logsumexp_run.py", "tests/common/test_run/ascend/im2col_run.py", "tests/common/test_run/ascend/selu_run.py", "tests/common/test_run/ascend/greater_run.py", "tests/common/test_run/ascend/matmul_addn_transdata_run.py", "tests/common/test_run/gather_nd_run.py" ]
[ "# Copyright 2019-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport akg\nimport numpy as np\nfrom akg.utils import kernel_exec as utils\nfrom akg.ops.math import Sqrt\nfrom tests.common.tensorio import compare_tensor\nfrom tests.common.gen_random import random_gaussian\nfrom akg.utils.result_analysis import target_profiling\nfrom akg.utils.format_transform import to_tvm_nd_array\n\ndef sqrt_run(shape, dtype, attrs):\n if 'tuning' in attrs.keys():\n t = attrs.get(\"tuning\", False)\n kernel_name = attrs.get(\"kernel_name\", False)\n mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)\n if t:\n expect, input, output = gen_data(dtype, shape)\n return mod, expect, (input, output)\n else:\n return mod\n else:\n expect, input, output = gen_data(dtype, shape)\n mod = utils.op_build_test(Sqrt, [shape], [dtype], kernel_name='sqrt', attrs=attrs)\n output = utils.mod_launch(mod, (input, output), expect=expect)\n if attrs.get(\"profiling\", False):\n target_name = attrs[\"target\"].split()[0]\n args_list = to_tvm_nd_array([input, output], akg.tvm.context(target_name, 0))\n target_profiling(mod, *args_list, target=target_name, repeat_time=attrs[\"repeat_times\"])\n return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)\n\n\ndef gen_data(dtype, shape):\n # Generate data for testing the op\n input = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)\n input = np.abs(input)\n expect = np.sqrt(input)\n output = np.full(expect.shape, np.nan, dtype)\n return expect, input, output\n", "# Copyright 2019-2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport numpy as np\r\nfrom akg.utils import kernel_exec as utils\r\nfrom tests.common.test_op.ascend.logsigmoid_ad import logsigmoid_ad\r\nfrom tests.common.tensorio import compare_tensor\r\nfrom tests.common.base import get_rtol_atol\r\nfrom tests.common.gen_random import random_gaussian\r\n\r\n\r\ndef logsigmoid_ad_benchmark(input_np):\r\n exp_input = np.exp(input_np)\r\n exp_input_1 = exp_input + 1\r\n logsigmoid_grad = np.reciprocal(exp_input_1)\r\n return logsigmoid_grad\r\n\r\n\r\ndef logsigmoid_ad_run(shape, dtype, kernel_name, attrs):\r\n if 'tuning' in attrs.keys():\r\n t = attrs.get(\"tuning\", False)\r\n kernel_name = attrs.get(\"kernel_name\", False)\r\n mod = utils.op_build_test(logsigmoid_ad, [shape, shape], [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)\r\n if t:\r\n expect, head_np, input_np, output = gen_data(dtype, shape)\r\n return mod, expect, (head_np, input_np, output)\r\n else:\r\n return mod\r\n\r\n else:\r\n expect, head_np, input_np, output = gen_data(dtype, shape)\r\n mod = utils.op_build_test(logsigmoid_ad, [shape, shape], [dtype, dtype], kernel_name=\"logsigmoid\", attrs=attrs)\r\n output = utils.mod_launch(mod, [head_np, input_np, output], expect=expect)\r\n rtol, atol = get_rtol_atol(\"logsigmoid\", dtype)\r\n return (head_np, input_np), output, expect, compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True)\r\n\r\n\r\ndef gen_data(dtype, shape):\r\n input_np = random_gaussian(shape, miu=1, sigma=0.5).astype(dtype)\r\n head_np = random_gaussian(shape, miu=1, sigma=0.5).astype(dtype)\r\n logsigmoid_grad = logsigmoid_ad_benchmark(input_np)\r\n expect = logsigmoid_grad * head_np\r\n output = np.full(expect.shape, np.nan, dtype)\r\n return expect, head_np, input_np, output", "# Copyright 2019-2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport numpy as np\r\nfrom akg.utils import kernel_exec as utils\r\nfrom tests.common.test_op.ascend import upsampling\r\nfrom tests.common.tensorio import compare_tensor\r\nfrom tests.common.gen_random import random_gaussian\r\n'''\r\n@param input_data: original image with data_format \"NHWC\".\r\n@param output_shape: output image shape with data_format \"NHWC\".\r\n notice that BatchNum and ChannelNum of output shape must be equal to input shape.\r\n'''\r\n\r\n\r\ndef upsampling_expect(input_data, output_shape):\r\n scale = [output_shape[i] / input_data.shape[i] for i in range(1, 3)]\r\n tmp = np.repeat(input_data, scale[0], axis=1)\r\n return np.repeat(tmp, scale[1], axis=2)\r\n\r\n\r\ndef upsampling_run(in_shape, out_shape, dtype, kernel_name, attrs):\r\n kernel_name = utils.gen_name_kernel(kernel_name, dtype, in_shape)\r\n\r\n if 'tuning' in attrs.keys():\r\n t = attrs.get(\"tuning\", False)\r\n kernel_name = attrs.get(\"kernel_name\", False)\r\n mod = utils.op_build_test(upsampling.upsampling,\r\n input_shapes=[in_shape], input_types=[dtype],\r\n op_attrs=[out_shape], kernel_name=kernel_name, attrs=attrs, tuning=t)\r\n if t:\r\n expect, input, output = gen_data(dtype, in_shape, out_shape)\r\n return mod, expect, (input, output)\r\n else:\r\n return mod\r\n else:\r\n # Create op\r\n mod = utils.op_build_test(upsampling.upsampling,\r\n input_shapes=[in_shape], input_types=[dtype],\r\n op_attrs=[out_shape], kernel_name=kernel_name, attrs=attrs)\r\n expect, input, output = gen_data(dtype, in_shape, out_shape)\r\n output = utils.mod_launch(mod, (input, output), expect=expect)\r\n\r\n return input, output, expect, compare_tensor(output, expect, atol=5e-01, rtol=5e-03, equal_nan=True)\r\n\r\n\r\ndef gen_data(dtype, in_shape, out_shape):\r\n # Generate data for testing the op\r\n input = random_gaussian(in_shape, miu=1, sigma=4).astype(dtype)\r\n # Generate expected output using numpy implementation of resize bilinear\r\n expect = upsampling_expect(input, out_shape)\r\n # Predict output\r\n output = np.full(expect.shape, np.nan, dtype)\r\n return expect, input, output\r\n", "# Copyright 2019-2021 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport numpy as np\r\nfrom akg.utils import kernel_exec as utils\r\nfrom tests.common.test_op.ascend import reduce_logsumexp\r\nfrom tests.common.tensorio import compare_tensor\r\nfrom tests.common.base import get_rtol_atol\r\nfrom tests.common.gen_random import random_gaussian\r\nfrom akg.utils.dsl_create import get_reduce_out_shape\r\n\r\ndef reduce_logsumexp_run(shape, dtype, axis=None, keepdims=False, kernel_name=\"reduce_logsumexp\", attrs=None):\r\n op_attrs = [axis, keepdims]\r\n \r\n if 'tuning' in attrs.keys():\r\n t = attrs.get(\"tuning\", False)\r\n kernel_name = attrs.get(\"kernel_name\", False)\r\n mod = utils.op_build_test(reduce_logsumexp.reduce_logsumexp, [shape], [dtype],\r\n op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)\r\n if t:\r\n expect, input, output = gen_data(dtype, shape, axis, keepdims)\r\n return mod, expect, (input, output)\r\n else:\r\n return mod\r\n\r\n else:\r\n expect, input, output = gen_data(dtype, shape, axis, keepdims)\r\n mod = utils.op_build_test(reduce_logsumexp.reduce_logsumexp, [shape], [dtype],\r\n op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)\r\n output = utils.mod_launch(mod, (input, output), expect=expect)\r\n rtol, atol = get_rtol_atol(\"reduce_logsumexp\", dtype)\r\n return input, output, expect, compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=True)\r\n\r\n\r\ndef gen_data(dtype, shape, axis, keepdims):\r\n input_np = random_gaussian(shape, miu=1, sigma=0.5).astype(dtype)\r\n exp_input = np.exp(input_np)\r\n sumexp_input = np.sum(exp_input, axis=axis, keepdims=keepdims)\r\n logsumexp_input = np.log(sumexp_input)\r\n out_shape = get_reduce_out_shape(shape, axis=axis, keepdims=keepdims)\r\n output = np.full(out_shape, np.nan, dtype)\r\n return logsumexp_input, input_np, output", "# Copyright 2019-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom tests.common.tensorio import compare_tensor\nimport numpy as np\nfrom akg.utils import kernel_exec as utils\nfrom tests.common.test_op.ascend.im2col import im2col_manual_schedule\nfrom tests.common.base import get_rtol_atol\nfrom tests.common.gen_random import random_gaussian\n\n\ndef im2col_benchmark(data, kernel, pad, stride):\n\n N, C1, H, W, C0 = data.shape\n stride_h, stride_w = stride\n kernel_h, kernel_w = kernel\n pad_t, pad_b, pad_l, pad_r = pad\n block_size = 16\n\n Ho = (H + pad_b + pad_t - kernel_h) // stride_h + 1\n Wo = (W + pad_r + pad_l - kernel_w) // stride_w + 1\n\n data_pad_shape = (N, C1, H + pad_t + pad_b, W + pad_l + pad_r, C0)\n data_pad = np.full(data_pad_shape, 0, dtype=data.dtype)\n data_pad[:, :, pad_t: pad_t + H, pad_l: pad_l + W, :] = data\n\n expect_shape = (N,\n (Ho * Wo + block_size - 1) // block_size,\n C1 * kernel_h * kernel_w,\n block_size,\n C0)\n expect = np.zeros(expect_shape, dtype=data.dtype)\n\n for n in range(N):\n for ho in range(Ho):\n for wo in range(Wo):\n for c1 in range(C1):\n for kh in range(kernel_h):\n for kw in range(kernel_w):\n expect[n, (ho*Wo+wo) // block_size, c1*kernel_h*kernel_w+kh*kernel_w+kw, (ho*Wo + wo) %\n block_size, :] = data_pad[n, c1, ho*stride_h + kh, wo*stride_w + kw, :]\n return expect\n\n\ndef im2col_run(shape, kernel, stride, pad, dtype, polyhedral=False, attrs=None):\n if polyhedral:\n raise Exception(\n \"ERROR: no DSL with poly support for im2col, please select manual schedule version\")\n else:\n mod = utils.op_build_test(im2col_manual_schedule, [shape],\n [dtype], kernel_name=\"im2col_manual_schedule\",\n op_attrs=[kernel, stride, pad], attrs=attrs, polyhedral=polyhedral)\n expect, data, res = gen_data(dtype, kernel, pad, shape, stride)\n output = utils.mod_launch(mod, [data, res], expect=expect)\n atol, rtol = get_rtol_atol(\"im2col\", dtype)\n return data, output, expect, compare_tensor(output, expect, atol=atol, rtol=rtol, equal_nan=True)\n\n\ndef gen_data(dtype, kernel, pad, shape, stride):\n data = random_gaussian(shape, miu=1, sigma=0.1).astype(dtype)\n expect = im2col_benchmark(data, kernel, pad, stride).astype(dtype)\n res = np.full(expect.shape, np.nan, dtype)\n return expect, data, res\n", "# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"selu_run\"\"\"\nimport numpy as np\nfrom tests.common.tensorio import compare_tensor\nfrom akg.utils import kernel_exec as utils\nfrom tests.common.test_op.ascend import selu\nfrom tests.common.base import get_rtol_atol\n\n# define selu oprator's required constants\nALPHA = 1.67326324235\nSCALE = 1.05070098736\n# define product of scale and alpha\nSCALE_ALPHA_PRODUCT = 1.75809934085\n# define a scalar, value = -1, the calculation of exp need minus one\nSCALAR_NEGATIVE_ONE = -1\n\ndef selu_run(shape, dtype, attrs):\n \"\"\"selu_run implementation\"\"\"\n mod = utils.op_build_test(selu.selu, [shape], [dtype], kernel_name='selu', op_attrs=[], attrs=attrs)\n args, exp_output, input_data = gen_data(dtype, shape)\n acu_output = utils.mod_launch(mod, args, expect=exp_output)\n # compare result\n rtol, atol = get_rtol_atol(\"selu\", dtype)\n testcase_result = compare_tensor(acu_output, exp_output, rtol=rtol, atol=atol, equal_nan=True)\n\n return input_data, acu_output, exp_output, testcase_result\n\n\ndef gen_data(dtype, shape):\n # result_numpy\n if dtype == 'int8':\n low_bound = -128\n high_bound = 127\n elif dtype == 'int32':\n low_bound = -1000\n high_bound = 1000\n else:\n low_bound = -1.0\n high_bound = 1.0\n\n input_data = np.random.uniform(low=low_bound, high=high_bound, size=tuple(shape)).astype(dtype)\n if dtype in (\"float16\", \"float32\"):\n input_data = input_data.astype(\"float32\")\n else:\n input_data = input_data.astype(\"float16\")\n tensor_zero = np.multiply(input_data, 0)\n # generate negative_res and positive_res to compute\n # When the element value is greater than 0 and less than 0\n negative_res = np.minimum(input_data, tensor_zero)\n positive_res = np.maximum(input_data, tensor_zero)\n exp_res = np.exp(negative_res)\n sub_res = np.add(exp_res, SCALAR_NEGATIVE_ONE)\n negative_muls_res = np.multiply(sub_res, SCALE_ALPHA_PRODUCT)\n if dtype == \"int8\":\n negative_muls_res = np.ceil(negative_muls_res)\n\n positive_muls_res = np.multiply(positive_res, SCALE)\n exp_output = np.add(negative_muls_res, positive_muls_res)\n\n # cast to ori_dtype\n if dtype == \"float16\" or dtype == \"int8\" or dtype == \"int32\":\n exp_output = exp_output.astype(dtype)\n\n input_data = input_data.astype(dtype)\n # inputs and output to hold the data\n output = np.full(shape, np.nan, dtype)\n args = [input_data, output]\n return args, exp_output, input_data\n", "# Copyright 2019-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom akg.utils import kernel_exec as utils\nfrom tests.common.test_op.ascend import greater\nfrom tests.common.gen_random import random_gaussian\n\n\ndef greater_run(shapes, dtype, kernel_name, attrs_op={}, cce_path=\"./\", attrs={}):\n attrs.update(attrs_op)\n if 'tuning' in attrs.keys():\n t = attrs.get(\"tuning\", False)\n kernel_name = attrs.get(\"kernel_name\", False)\n mod = utils.op_build_test(greater.greater, shapes, [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)\n if t:\n benchMark, inputs, output = gen_data(shapes)\n return mod, benchMark, inputs + [output]\n else:\n return mod\n else:\n mod = utils.op_build_test(greater.greater, shapes, [dtype, dtype], kernel_name=kernel_name, attrs=attrs)\n benchMark, inputs, output = gen_data(shapes)\n output = utils.mod_launch(mod, inputs + [output], expect=benchMark)\n\n return inputs, output, benchMark, np.array_equal(output, benchMark)\n\n\ndef gen_data(shapes):\n inputs = []\n for i in range(len(shapes)):\n shape = shapes[i]\n input = random_gaussian(shape, miu=1, sigma=0.1).astype(np.float16)\n inputs.append(input)\n \"\"\"\n inputs.append(np.ones(shapes[0], dtype=np.float16))\n for i in range(16):\n inputs[0][i]=0\n inputs.append(np.zeros(shapes[0], dtype=np.float16))\n \"\"\"\n if len(inputs) != 2:\n raise RuntimeError(\"inputs num should be 2\")\n benchMark = np.greater(inputs[0], inputs[1])\n output = np.full(benchMark.shape, 0, bool)\n return benchMark, inputs, output\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport akg.tvm\nimport numpy as np\nfrom akg.utils import kernel_exec as utils\nfrom akg.ops.math.ascend import MatMul\nfrom tests.common.test_run.ascend.matmul_run import *\nfrom akg.ops.math import Addn\nfrom akg.ops.math import Add\n\ndef matmul_addn_transdata(x, y, adds, b, out_dtype, left_format=\"zZ\", right_format=\"nZ\", out_format=\"zN\", transpose_x=False,\n transpose_y=False, attrs={}, target='cce'):\n matmul_res, attrs_mat = MatMul(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs=attrs)\n addn_res = Addn(adds, target=target)\n res = Add(matmul_res, addn_res, target=target)\n if out_format == 'zN':\n n1, m1, m0, n0 = matmul_res.shape[-4:]\n new_shape = matmul_res.shape[:-4] + [m1 * m0, n1 * n0]\n elif out_format == 'zZ':\n m1, n1, m0, n0 = matmul_res.shape[-4:]\n new_shape = matmul_res.shape[:-4] + [m1 * m0, n1 * n0]\n\n func = akg.tvm.get_global_func(\"TransData\")\n res = func([res], {\"src_format\" : \"FRACTAL_NZ\", \"dst_format\" : \"DefaultFormat\", \"output_shape\": new_shape})\n return res, attrs_mat\n\ndef matmul_addn_transdata_compile(shape_x, shape_y, bias, add_n, left_format, right_format, output_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs, tuning=False):\n batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)\n m = (m + 15) // 16 * 16\n n = (n + 15) // 16 * 16\n k = (k + 15) // 16 * 16\n shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias,\n left_format, right_format, output_format)\n addn_shapes = []\n for i in range(add_n):\n addn_shapes.append(out_shape)\n \n input_shapes = [shape_xx, shape_yy, addn_shapes, bias_shape]\n input_types = [dtype, dtype, out_dtype, bias_dtype]\n \n has_bias = False\n if bias == 1:\n has_bias = True\n op_attrs = [out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]\n if has_bias == False:\n input_shapes = [shape_xx, shape_yy, addn_shapes]\n input_types = [dtype, dtype, out_dtype]\n op_attrs = [None, out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]\n return utils.op_build_test(matmul_addn_transdata, input_shapes, input_types, op_attrs, kernel_name, attrs=attrs, tuning=tuning)\n\ndef matmul_addn_transdata_execute(shape_x, shape_y, bias, add_n, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs={}):\n batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)\n m = (m + 15) // 16 * 16\n n = (n + 15) // 16 * 16\n k = (k + 15) // 16 * 16\n shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format)\n mod = matmul_addn_transdata_compile(shape_x, shape_y, bias, add_n, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs=attrs)\n # Generate data\n m_x, m_y, bench_mark, bias_data = matmul_data(batch_tuple, m, k, n, dtype, bias_dtype, out_dtype, bias, adj_x, adj_y, left_format, right_format, out_format)\n \n inputs = []\n mod_data = [m_x, m_y]\n for i in range(add_n):\n input = random_gaussian(out_shape, miu=1, sigma=0.1).astype(out_dtype)\n inputs.append(input)\n mod_data.append(input)\n bench_mark = np.add(np.sum(inputs, axis=0), bench_mark)\n\n transpose_axis = []\n new_shape = []\n out_shape = list(out_shape)\n if out_format == 'zN':\n n1, m1, m0, n0 = out_shape[-4:]\n new_shape = out_shape[:-4] + [m1 * m0, n1 * n0]\n transpose_axis = [0, 1+1, 2+1, 0+1, 3+1]\n elif out_format == 'zZ':\n m1, n1, m0, n0 = out_shape[-4:]\n new_shape = out_shape[:-4] + [m1 * m0, n1 * n0]\n transpose_axis = [0, 0+1, 2+1, 1+1, 3+1]\n bench_mark = bench_mark.transpose(transpose_axis)\n bench_mark = np.reshape(bench_mark,new_shape)\n\n # mod launch\n output = np.full(bench_mark.shape, np.nan, out_dtype)\n if bias == 0:\n mod_data.append(output) \n output = utils.mod_launch(mod, mod_data, expect=bench_mark)\n elif bias == 1:\n mod_data.append(bias_data)\n mod_data.append(output)\n output = utils.mod_launch(mod, mod_data, expect=bench_mark)\n # compare result\n rtol, atol = get_rtol_atol(\"matmul\", dtype)\n compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)\n return (m_x, m_y), output, bench_mark, compare_result\n", "# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nfrom copy import deepcopy\nfrom tests.common.base import get_rtol_atol\nfrom tests.common.tensorio import compare_tensor\nfrom akg.utils import kernel_exec as utils\nfrom akg.ops.array.gpu import GatherNd\nfrom akg.utils.result_analysis import target_profiling\nfrom akg.utils.format_transform import to_tvm_nd_array\nfrom tests.common.gen_random import random_gaussian, gen_indices_gather_nd\nfrom tests.common.test_utils import gather_nd_np\nimport numpy as np\nimport akg\n\n\ndef gen_data(shape1, dtype1, shape2, dtype2):\n params = random_gaussian(shape1).astype(dtype1)\n out_dim1 = 1\n for i in range(len(shape2) - 1):\n out_dim1 = out_dim1 * shape2[i]\n\n indices = gen_indices_gather_nd(shape1, shape2, dtype2)\n expect = gather_nd_np(params, indices)\n\n return params, indices, expect\n\ndef gather_nd_run(shape1, dtype1, shape2, dtype2, poly_sch=True, attrs=None):\n if not attrs:\n attrs = {\"target\": \"cuda\"}\n mod = utils.op_build_test(GatherNd, [shape1, shape2], [dtype1, dtype2],\n polyhedral=poly_sch, attrs=attrs, kernel_name=\"gather_nd\")\n\n # gen data\n params, indices, expect = gen_data(shape1, dtype1, shape2, dtype2)\n output_shape = expect.shape\n\n if len(expect.shape) == 0:\n output_shape = (1, )\n output = np.zeros(output_shape, expect.dtype)\n output = utils.mod_launch(mod, (params, indices, output), expect = expect)\n\n atol, rtol = get_rtol_atol(\"gather_nd\", dtype1)\n res = compare_tensor(output, expect, rtol=rtol, atol=atol)\n print(\"Test {}\".format(\"Pass\" if res else \"Failed\"))\n target_name = attrs[\"target\"].split()[0]\n if not res:\n mod_source = mod\n if target_name != \"llvm\":\n mod_source = mod.imported_modules[0]\n print(\"Error {}:========================\".format(target_name))\n print(mod_source.get_source())\n raise AssertionError(\"Test fail\")\n\n if attrs[\"profiling\"]:\n params, indices, output = to_tvm_nd_array(\n [params, indices, output], akg.tvm.context(target_name, 0))\n target_profiling(mod, params, indices, output, target=target_name, repeat_time=attrs[\"repeat_times\"])\n return (params, indices), output, expect, res" ]
[ [ "numpy.sqrt", "numpy.abs", "numpy.full" ], [ "numpy.reciprocal", "numpy.exp", "numpy.full" ], [ "numpy.repeat", "numpy.full" ], [ "numpy.log", "numpy.exp", "numpy.sum", "numpy.full" ], [ "numpy.zeros", "numpy.full" ], [ "numpy.maximum", "numpy.minimum", "numpy.multiply", "numpy.full", "numpy.ceil", "numpy.add", "numpy.exp" ], [ "numpy.greater", "numpy.array_equal", "numpy.full" ], [ "numpy.reshape", "numpy.sum", "numpy.full" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zcdzcdzcd/models
[ "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d", "a31b526a7617a152a138a865b5689bf5b59f655d" ]
[ "official/nlp/optimization.py", "research/attention_ocr/python/datasets/unittest_utils.py", "official/transformer/utils/tokenizer_test.py", "official/transformer/v2/transformer_main.py", "samples/cookbook/regression/custom_regression.py", "official/r1/transformer/embedding_layer.py", "research/gan/image_compression/networks.py", "official/vision/image_classification/resnet_imagenet_main.py", "research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py", "research/slim/nets/i3d.py", "research/lstm_object_detection/lstm/lstm_cells_test.py", "research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nimport tensorflow as tf\n\n\nclass WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"Applys a warmup schedule on a given learning rate decay schedule.\"\"\"\n\n def __init__(\n self,\n initial_learning_rate,\n decay_schedule_fn,\n warmup_steps,\n power=1.0,\n name=None):\n super(WarmUp, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.warmup_steps = warmup_steps\n self.power = power\n self.decay_schedule_fn = decay_schedule_fn\n self.name = name\n\n def __call__(self, step):\n with tf.name_scope(self.name or 'WarmUp') as name:\n # Implements polynomial warmup. i.e., if global_step < warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n global_step_float = tf.cast(step, tf.float32)\n warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)\n warmup_percent_done = global_step_float / warmup_steps_float\n warmup_learning_rate = (\n self.initial_learning_rate *\n tf.math.pow(warmup_percent_done, self.power))\n return tf.cond(global_step_float < warmup_steps_float,\n lambda: warmup_learning_rate,\n lambda: self.decay_schedule_fn(step),\n name=name)\n\n def get_config(self):\n return {\n 'initial_learning_rate': self.initial_learning_rate,\n 'decay_schedule_fn': self.decay_schedule_fn,\n 'warmup_steps': self.warmup_steps,\n 'power': self.power,\n 'name': self.name\n }\n\n\ndef create_optimizer(init_lr, num_train_steps, num_warmup_steps):\n \"\"\"Creates an optimizer with learning rate schedule.\"\"\"\n # Implements linear decay of the learning rate.\n learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=init_lr,\n decay_steps=num_train_steps,\n end_learning_rate=0.0)\n if num_warmup_steps:\n learning_rate_fn = WarmUp(initial_learning_rate=init_lr,\n decay_schedule_fn=learning_rate_fn,\n warmup_steps=num_warmup_steps)\n optimizer = AdamWeightDecay(\n learning_rate=learning_rate_fn,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=['layer_norm', 'bias'])\n return optimizer\n\n\nclass AdamWeightDecay(tf.keras.optimizers.Adam):\n \"\"\"Adam enables L2 weight decay and clip_by_global_norm on gradients.\n\n Just adding the square of the weights to the loss function is *not* the\n correct way of using L2 regularization/weight decay with Adam, since that will\n interact with the m and v parameters in strange ways.\n\n Instead we want ot decay the weights in a manner that doesn't interact with\n the m/v parameters. This is equivalent to adding the square of the weights to\n the loss with plain (non-momentum) SGD.\n \"\"\"\n\n def __init__(self,\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-7,\n amsgrad=False,\n weight_decay_rate=0.0,\n include_in_weight_decay=None,\n exclude_from_weight_decay=None,\n name='AdamWeightDecay',\n **kwargs):\n super(AdamWeightDecay, self).__init__(\n learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)\n self.weight_decay_rate = weight_decay_rate\n self._include_in_weight_decay = include_in_weight_decay\n self._exclude_from_weight_decay = exclude_from_weight_decay\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates an optimizer from its config with WarmUp custom object.\"\"\"\n custom_objects = {'WarmUp': WarmUp}\n return super(AdamWeightDecay, cls).from_config(\n config, custom_objects=custom_objects)\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype,\n apply_state)\n apply_state['weight_decay_rate'] = tf.constant(\n self.weight_decay_rate, name='adam_weight_decay_rate')\n\n def _decay_weights_op(self, var, learning_rate, apply_state):\n do_decay = self._do_use_weight_decay(var.name)\n if do_decay:\n return var.assign_sub(\n learning_rate * var *\n apply_state['weight_decay_rate'],\n use_locking=self._use_locking)\n return tf.no_op()\n\n def apply_gradients(self, grads_and_vars, name=None):\n grads, tvars = list(zip(*grads_and_vars))\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars))\n\n def _get_lr(self, var_device, var_dtype, apply_state):\n \"\"\"Retrieves the learning rate with the given state.\"\"\"\n if apply_state is None:\n return self._decayed_lr_t[var_dtype], {}\n\n apply_state = apply_state or {}\n coefficients = apply_state.get((var_device, var_dtype))\n if coefficients is None:\n coefficients = self._fallback_apply_state(var_device, var_dtype)\n apply_state[(var_device, var_dtype)] = coefficients\n\n return coefficients['lr_t'], dict(apply_state=apply_state)\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super(AdamWeightDecay, self)._resource_apply_dense(\n grad, var, **kwargs)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super(AdamWeightDecay, self)._resource_apply_sparse(\n grad, var, indices, **kwargs)\n\n def get_config(self):\n config = super(AdamWeightDecay, self).get_config()\n config.update({\n 'weight_decay_rate': self.weight_decay_rate,\n })\n return config\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if self.weight_decay_rate == 0:\n return False\n\n if self._include_in_weight_decay:\n for r in self._include_in_weight_decay:\n if re.search(r, param_name) is not None:\n return True\n\n if self._exclude_from_weight_decay:\n for r in self._exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n", "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functions to make unit testing easier.\"\"\"\n\nimport StringIO\nimport numpy as np\nfrom PIL import Image as PILImage\nimport tensorflow as tf\n\n\ndef create_random_image(image_format, shape):\n \"\"\"Creates an image with random values.\n\n Args:\n image_format: An image format (PNG or JPEG).\n shape: A tuple with image shape (including channels).\n\n Returns:\n A tuple (<numpy ndarray>, <a string with encoded image>)\n \"\"\"\n image = np.random.randint(low=0, high=255, size=shape, dtype='uint8')\n io = StringIO.StringIO()\n image_pil = PILImage.fromarray(image)\n image_pil.save(io, image_format, subsampling=0, quality=100)\n return image, io.getvalue()\n\n\ndef create_serialized_example(name_to_values):\n \"\"\"Creates a tf.Example proto using a dictionary.\n\n It automatically detects type of values and define a corresponding feature.\n\n Args:\n name_to_values: A dictionary.\n\n Returns:\n tf.Example proto.\n \"\"\"\n example = tf.train.Example()\n for name, values in name_to_values.items():\n feature = example.features.feature[name]\n if isinstance(values[0], str):\n add = feature.bytes_list.value.extend\n elif isinstance(values[0], float):\n add = feature.float32_list.value.extend\n elif isinstance(values[0], int):\n add = feature.int64_list.value.extend\n else:\n raise AssertionError('Unsupported type: %s' % type(values[0]))\n add(values)\n return example.SerializeToString()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test Subtokenizer and string helper methods.\"\"\"\n\nimport collections\nimport tempfile\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\nfrom official.transformer.utils import tokenizer\n\n\nclass SubtokenizerTest(tf.test.TestCase):\n\n def _init_subtokenizer(self, vocab_list):\n temp_file = tempfile.NamedTemporaryFile(delete=False)\n with tf.io.gfile.GFile(temp_file.name, \"w\") as w:\n for subtoken in vocab_list:\n w.write(\"'%s'\" % subtoken)\n w.write(\"\\n\")\n return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])\n\n def test_encode(self):\n vocab_list = [\"123_\", \"test\", \"ing_\"]\n subtokenizer = self._init_subtokenizer(vocab_list)\n s = \"testing 123\"\n encoded_list = subtokenizer.encode(s)\n self.assertEqual([1, 2, 0], encoded_list)\n\n def test_decode(self):\n vocab_list = [\"123_\", \"test\", \"ing_\"]\n subtokenizer = self._init_subtokenizer(vocab_list)\n encoded_list = [1, 2, 0] # testing 123\n decoded_str = subtokenizer.decode(encoded_list)\n self.assertEqual(\"testing 123\", decoded_str)\n\n def test_subtoken_ids_to_tokens(self):\n vocab_list = [\"123_\", \"test\", \"ing_\"]\n subtokenizer = self._init_subtokenizer(vocab_list)\n encoded_list = [1, 2, 0] # testing 123\n token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)\n self.assertEqual([u\"testing\", u\"123\"], token_list)\n\n\nclass StringHelperTest(tf.test.TestCase):\n\n def test_split_string_to_tokens(self):\n text = \"test? testing 123.\"\n\n tokens = tokenizer._split_string_to_tokens(text)\n self.assertEqual([\"test\", \"? \", \"testing\", \"123\", \".\"], tokens)\n\n def test_join_tokens_to_string(self):\n tokens = [\"test\", \"? \", \"testing\", \"123\", \".\"]\n\n s = tokenizer._join_tokens_to_string(tokens)\n self.assertEqual(\"test? testing 123.\", s)\n\n def test_escape_token(self):\n token = u\"abc_\\\\4\"\n alphabet = set(\"abc_\\\\u;\")\n\n escaped_token = tokenizer._escape_token(token, alphabet)\n self.assertEqual(\"abc\\\\u\\\\\\\\\\\\52;_\", escaped_token)\n\n def test_unescape_token(self):\n escaped_token = u\"Underline: \\\\u, Backslash: \\\\\\\\, Unicode: \\\\52;\"\n\n unescaped_token = tokenizer._unescape_token(escaped_token)\n self.assertEqual(\n \"Underline: _, Backslash: \\\\, Unicode: 4\", unescaped_token)\n\n def test_list_to_index_dict(self):\n lst = [\"test\", \"strings\"]\n\n d = tokenizer._list_to_index_dict(lst)\n self.assertDictEqual({\"test\": 0, \"strings\": 1}, d)\n\n def test_split_token_to_subtokens(self):\n token = \"abc\"\n subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"ab\": 3}\n max_subtoken_length = 2\n\n subtokens = tokenizer._split_token_to_subtokens(\n token, subtoken_dict, max_subtoken_length)\n self.assertEqual([\"ab\", \"c\"], subtokens)\n\n def test_generate_alphabet_dict(self):\n s = [\"testing\", \"123\"]\n reserved_tokens = [\"???\"]\n\n alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)\n self.assertIn(\"?\", alphabet)\n self.assertIn(\"t\", alphabet)\n self.assertIn(\"e\", alphabet)\n self.assertIn(\"s\", alphabet)\n self.assertIn(\"i\", alphabet)\n self.assertIn(\"n\", alphabet)\n self.assertIn(\"g\", alphabet)\n self.assertIn(\"1\", alphabet)\n self.assertIn(\"2\", alphabet)\n self.assertIn(\"3\", alphabet)\n\n def test_count_and_gen_subtokens(self):\n token_counts = {\"abc\": 5}\n alphabet = set(\"abc_\")\n subtoken_dict = {\"a\": 0, \"b\": 1, \"c\": 2, \"_\": 3}\n max_subtoken_length = 2\n\n subtoken_counts = tokenizer._count_and_gen_subtokens(\n token_counts, alphabet, subtoken_dict, max_subtoken_length)\n\n self.assertIsInstance(subtoken_counts, collections.defaultdict)\n self.assertDictEqual(\n {\"a\": 5, \"b\": 5, \"c\": 5, \"_\": 5, \"ab\": 5, \"bc\": 5, \"c_\": 5,\n \"abc\": 5, \"bc_\": 5, \"abc_\": 5}, subtoken_counts)\n\n def test_filter_and_bucket_subtokens(self):\n subtoken_counts = collections.defaultdict(\n int, {\"a\": 2, \"b\": 4, \"c\": 1, \"ab\": 6, \"ac\": 3, \"abbc\": 5})\n min_count = 3\n\n subtoken_buckets = tokenizer._filter_and_bucket_subtokens(\n subtoken_counts, min_count)\n\n self.assertEqual(len(subtoken_buckets[0]), 0)\n self.assertEqual(set(\"b\"), subtoken_buckets[1])\n self.assertEqual(set([\"ab\", \"ac\"]), subtoken_buckets[2])\n self.assertEqual(len(subtoken_buckets[3]), 0)\n self.assertEqual(set([\"abbc\"]), subtoken_buckets[4])\n\n def test_gen_new_subtoken_list(self):\n subtoken_counts = collections.defaultdict(\n int, {\"translate\": 10, \"t\": 40, \"tr\": 16, \"tra\": 12})\n min_count = 5\n alphabet = set(\"translate\")\n reserved_tokens = [\"reserved\", \"tokens\"]\n\n subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(\n subtoken_counts, min_count, alphabet, reserved_tokens)\n\n # Check that \"tra\" isn\"t in the list (its count should be decremented to 2,\n # so it should not be added to the canddiate list).\n self.assertNotIn(\"tra\", subtoken_list)\n\n self.assertIn(\"tr\", subtoken_list)\n self.assertIn(\"t\", subtoken_list)\n\n self.assertEqual(len(\"translate\"), max_token_length)\n\n def test_generate_subtokens(self):\n token_counts = {\"ab\": 1, \"bc\": 3, \"abc\": 5}\n alphabet = set(\"abc_\")\n min_count = 100\n num_iterations = 1\n reserved_tokens = [\"reserved\", \"tokens\"]\n\n vocab_list = tokenizer._generate_subtokens(\n token_counts, alphabet, min_count, num_iterations, reserved_tokens)\n\n # Check that reserved tokens are at the front of the list\n self.assertEqual(vocab_list[:2], reserved_tokens)\n\n # Check that each character in alphabet is in the vocab list\n for c in alphabet:\n self.assertIn(c, vocab_list)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train and evaluate the Transformer model.\n\nSee README for description of setting the training schedule and evaluating the\nBLEU score.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\n# pylint: disable=g-bad-import-order\nfrom official.transformer import compute_bleu\nfrom official.transformer.utils import tokenizer\nfrom official.transformer.v2 import data_pipeline\nfrom official.transformer.v2 import metrics\nfrom official.transformer.v2 import misc\nfrom official.transformer.v2 import optimizer\nfrom official.transformer.v2 import transformer\nfrom official.transformer.v2 import translate\nfrom official.utils.flags import core as flags_core\nfrom official.utils.logs import logger\nfrom official.utils.misc import keras_utils\nfrom official.utils.misc import distribution_utils\n\nINF = int(1e9)\nBLEU_DIR = \"bleu\"\n_SINGLE_SAMPLE = 1\n\n\ndef translate_and_compute_bleu(model,\n params,\n subtokenizer,\n bleu_source,\n bleu_ref,\n distribution_strategy=None):\n \"\"\"Translate file and report the cased and uncased bleu scores.\n\n Args:\n model: A Keras model, used to generate the translations.\n params: A dictionary, containing the translation related parameters.\n subtokenizer: A subtokenizer object, used for encoding and decoding source\n and translated lines.\n bleu_source: A file containing source sentences for translation.\n bleu_ref: A file containing the reference for the translated sentences.\n distribution_strategy: A platform distribution strategy, used for TPU based\n translation.\n\n Returns:\n uncased_score: A float, the case insensitive BLEU score.\n cased_score: A float, the case sensitive BLEU score.\n \"\"\"\n # Create temporary file to store translation.\n tmp = tempfile.NamedTemporaryFile(delete=False)\n tmp_filename = tmp.name\n\n translate.translate_file(\n model,\n params,\n subtokenizer,\n bleu_source,\n output_file=tmp_filename,\n print_all_translations=False,\n distribution_strategy=distribution_strategy)\n\n # Compute uncased and cased bleu scores.\n uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)\n cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)\n os.remove(tmp_filename)\n return uncased_score, cased_score\n\n\ndef evaluate_and_log_bleu(model,\n params,\n bleu_source,\n bleu_ref,\n vocab_file,\n distribution_strategy=None):\n \"\"\"Calculate and record the BLEU score.\n\n Args:\n model: A Keras model, used to generate the translations.\n params: A dictionary, containing the translation related parameters.\n bleu_source: A file containing source sentences for translation.\n bleu_ref: A file containing the reference for the translated sentences.\n vocab_file: A file containing the vocabulary for translation.\n distribution_strategy: A platform distribution strategy, used for TPU based\n translation.\n\n Returns:\n uncased_score: A float, the case insensitive BLEU score.\n cased_score: A float, the case sensitive BLEU score.\n \"\"\"\n subtokenizer = tokenizer.Subtokenizer(vocab_file)\n\n uncased_score, cased_score = translate_and_compute_bleu(\n model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy)\n\n logging.info(\"Bleu score (uncased): %s\", uncased_score)\n logging.info(\"Bleu score (cased): %s\", cased_score)\n return uncased_score, cased_score\n\n\nclass TransformerTask(object):\n \"\"\"Main entry of Transformer model.\"\"\"\n\n def __init__(self, flags_obj):\n \"\"\"Init function of TransformerMain.\n\n Args:\n flags_obj: Object containing parsed flag values, i.e., FLAGS.\n\n Raises:\n ValueError: if not using static batch for input data on TPU.\n \"\"\"\n self.flags_obj = flags_obj\n self.predict_model = None\n\n # Add flag-defined parameters to params object\n num_gpus = flags_core.get_num_gpus(flags_obj)\n self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus)\n\n params[\"num_gpus\"] = num_gpus\n params[\"use_ctl\"] = flags_obj.use_ctl\n params[\"data_dir\"] = flags_obj.data_dir\n params[\"model_dir\"] = flags_obj.model_dir\n params[\"static_batch\"] = flags_obj.static_batch\n params[\"max_length\"] = flags_obj.max_length\n params[\"decode_batch_size\"] = flags_obj.decode_batch_size\n params[\"decode_max_length\"] = flags_obj.decode_max_length\n params[\"padded_decode\"] = flags_obj.padded_decode\n params[\"num_parallel_calls\"] = (\n flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE)\n\n params[\"use_synthetic_data\"] = flags_obj.use_synthetic_data\n params[\"batch_size\"] = flags_obj.batch_size or params[\"default_batch_size\"]\n params[\"repeat_dataset\"] = None\n params[\"dtype\"] = flags_core.get_tf_dtype(flags_obj)\n params[\"enable_tensorboard\"] = flags_obj.enable_tensorboard\n params[\"enable_metrics_in_training\"] = flags_obj.enable_metrics_in_training\n params[\"steps_between_evals\"] = flags_obj.steps_between_evals\n\n self.distribution_strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy=flags_obj.distribution_strategy,\n num_gpus=num_gpus,\n all_reduce_alg=flags_obj.all_reduce_alg,\n num_packs=flags_obj.num_packs,\n tpu_address=flags_obj.tpu or \"\")\n if self.use_tpu:\n params[\"num_replicas\"] = self.distribution_strategy.num_replicas_in_sync\n if not params[\"static_batch\"]:\n raise ValueError(\"TPU requires static batch for input data.\")\n else:\n logging.info(\"Running transformer with num_gpus = %d\", num_gpus)\n\n if self.distribution_strategy:\n logging.info(\"For training, using distribution strategy: %s\",\n self.distribution_strategy)\n else:\n logging.info(\"Not using any distribution strategy.\")\n\n if params[\"dtype\"] == tf.float16:\n # TODO(reedwm): It's pretty ugly to set the global policy in a constructor\n # like this. What if multiple instances of TransformerTask are created?\n # We should have a better way in the tf.keras.mixed_precision API of doing\n # this.\n loss_scale = flags_core.get_loss_scale(\n flags_obj, default_for_fp16=\"dynamic\")\n policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(\n \"mixed_float16\", loss_scale=loss_scale)\n tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)\n\n elif params[\"dtype\"] == tf.bfloat16:\n policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(\n \"mixed_bfloat16\")\n tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)\n\n @property\n def use_tpu(self):\n if self.distribution_strategy:\n return isinstance(self.distribution_strategy,\n tf.distribute.experimental.TPUStrategy)\n return False\n\n def train(self):\n \"\"\"Trains the model.\"\"\"\n params = self.params\n flags_obj = self.flags_obj\n # Sets config options.\n keras_utils.set_session_config(enable_xla=flags_obj.enable_xla)\n\n _ensure_dir(flags_obj.model_dir)\n with distribution_utils.get_strategy_scope(self.distribution_strategy):\n model = transformer.create_model(params, is_train=True)\n opt = self._create_optimizer()\n\n current_step = 0\n checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)\n latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info(\"Loaded checkpoint %s\", latest_checkpoint)\n current_step = opt.iterations.numpy()\n\n if params[\"use_ctl\"]:\n train_loss_metric = tf.keras.metrics.Mean(\n \"training_loss\", dtype=tf.float32)\n if params[\"enable_tensorboard\"]:\n summary_writer = tf.compat.v2.summary.create_file_writer(\n flags_obj.model_dir)\n else:\n summary_writer = tf.compat.v2.summary.create_noop_writer()\n train_metrics = [train_loss_metric]\n if params[\"enable_metrics_in_training\"]:\n train_metrics = train_metrics + model.metrics\n else:\n model.compile(opt)\n\n model.summary()\n\n if self.use_tpu:\n # Different from experimental_distribute_dataset,\n # experimental_distribute_datasets_from_function requires\n # per-replica/local batch size.\n params[\"batch_size\"] /= self.distribution_strategy.num_replicas_in_sync\n train_ds = (\n self.distribution_strategy\n .experimental_distribute_datasets_from_function(\n lambda ctx: data_pipeline.train_input_fn(params, ctx)))\n else:\n train_ds = data_pipeline.train_input_fn(params)\n map_data_fn = data_pipeline.map_data_for_transformer_fn\n train_ds = train_ds.map(\n map_data_fn, num_parallel_calls=params[\"num_parallel_calls\"])\n if params[\"use_ctl\"]:\n train_ds_iterator = iter(train_ds)\n\n callbacks = self._create_callbacks(flags_obj.model_dir, 0, params)\n\n # TODO(b/139418525): Refactor the custom training loop logic.\n @tf.function\n def train_steps(iterator, steps):\n \"\"\"Training steps function for TPU runs.\n\n Args:\n iterator: The input iterator of the training dataset.\n steps: An integer, the number of training steps.\n\n Returns:\n A float, the loss value.\n \"\"\"\n\n def _step_fn(inputs):\n \"\"\"Per-replica step function.\"\"\"\n inputs, targets = inputs\n with tf.GradientTape() as tape:\n logits = model([inputs, targets], training=True)\n loss = metrics.transformer_loss(logits, targets,\n params[\"label_smoothing\"],\n params[\"vocab_size\"])\n # Scales the loss, which results in using the average loss across all\n # of the replicas for backprop.\n scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync\n\n # De-dupes variables due to keras tracking issues.\n tvars = list({id(v): v for v in model.trainable_variables}.values())\n grads = tape.gradient(scaled_loss, tvars)\n opt.apply_gradients(zip(grads, tvars))\n # For reporting, the metric takes the mean of losses.\n train_loss_metric.update_state(loss)\n\n for _ in tf.range(steps):\n train_loss_metric.reset_states()\n self.distribution_strategy.experimental_run_v2(\n _step_fn, args=(next(iterator),))\n\n cased_score, uncased_score = None, None\n cased_score_history, uncased_score_history = [], []\n while current_step < flags_obj.train_steps:\n remaining_steps = flags_obj.train_steps - current_step\n train_steps_per_eval = (\n remaining_steps if remaining_steps < flags_obj.steps_between_evals\n else flags_obj.steps_between_evals)\n current_iteration = current_step // flags_obj.steps_between_evals\n\n logging.info(\n \"Start train iteration at global step:{}\".format(current_step))\n history = None\n if params[\"use_ctl\"]:\n if not self.use_tpu:\n raise NotImplementedError(\n \"Custom training loop on GPUs is not implemented.\")\n # Runs training steps.\n with summary_writer.as_default():\n train_steps(\n train_ds_iterator,\n tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32))\n current_step += train_steps_per_eval\n train_loss = train_loss_metric.result().numpy().astype(float)\n logging.info(\"Train Step: %d/%d / loss = %s\", current_step,\n flags_obj.train_steps, train_loss)\n\n if params[\"enable_tensorboard\"]:\n for metric_obj in train_metrics:\n tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(),\n current_step)\n\n checkpoint_name = checkpoint.save(\n os.path.join(flags_obj.model_dir,\n \"ctl_step_{}.ckpt\".format(current_step)))\n logging.info(\"Saved checkpoint to %s\", checkpoint_name)\n else:\n if self.use_tpu:\n raise NotImplementedError(\n \"Keras model.fit on TPUs is not implemented.\")\n history = model.fit(\n train_ds,\n initial_epoch=current_iteration,\n epochs=current_iteration + 1,\n steps_per_epoch=train_steps_per_eval,\n callbacks=callbacks,\n # If TimeHistory is enabled, progress bar would be messy. Increase\n # the verbose level to get rid of it.\n verbose=(2 if flags_obj.enable_time_history else 1))\n current_step += train_steps_per_eval\n logging.info(\"Train history: {}\".format(history.history))\n\n logging.info(\"End train iteration at global step:{}\".format(current_step))\n\n if (flags_obj.bleu_source and flags_obj.bleu_ref):\n uncased_score, cased_score = self.eval()\n cased_score_history.append([current_iteration + 1, cased_score])\n uncased_score_history.append([current_iteration + 1, uncased_score])\n\n stats = ({\n \"loss\": train_loss\n } if history is None else misc.build_stats(history, callbacks))\n if uncased_score and cased_score:\n stats[\"bleu_uncased\"] = uncased_score\n stats[\"bleu_cased\"] = cased_score\n stats[\"bleu_uncased_history\"] = uncased_score_history\n stats[\"bleu_cased_history\"] = cased_score_history\n return stats\n\n def eval(self):\n \"\"\"Evaluates the model.\"\"\"\n distribution_strategy = self.distribution_strategy if self.use_tpu else None\n\n # We only want to create the model under DS scope for TPU case.\n # When 'distribution_strategy' is None, a no-op DummyContextManager will\n # be used.\n with distribution_utils.get_strategy_scope(distribution_strategy):\n if not self.predict_model:\n self.predict_model = transformer.create_model(self.params, False)\n self._load_weights_if_possible(\n self.predict_model,\n tf.train.latest_checkpoint(self.flags_obj.model_dir))\n self.predict_model.summary()\n return evaluate_and_log_bleu(\n self.predict_model, self.params, self.flags_obj.bleu_source,\n self.flags_obj.bleu_ref, self.flags_obj.vocab_file,\n distribution_strategy)\n\n def predict(self):\n \"\"\"Predicts result from the model.\"\"\"\n params = self.params\n flags_obj = self.flags_obj\n\n with tf.name_scope(\"model\"):\n model = transformer.create_model(params, is_train=False)\n self._load_weights_if_possible(\n model, tf.train.latest_checkpoint(self.flags_obj.model_dir))\n model.summary()\n subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file)\n\n ds = data_pipeline.eval_input_fn(params)\n ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE)\n ret = model.predict(ds)\n val_outputs, _ = ret\n length = len(val_outputs)\n for i in range(length):\n translate.translate_from_input(val_outputs[i], subtokenizer)\n\n def _create_callbacks(self, cur_log_dir, init_steps, params):\n \"\"\"Creates a list of callbacks.\"\"\"\n sfunc = optimizer.LearningRateFn(params[\"learning_rate\"],\n params[\"hidden_size\"],\n params[\"learning_rate_warmup_steps\"])\n scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps)\n callbacks = misc.get_callbacks(params[\"steps_between_evals\"])\n callbacks.append(scheduler_callback)\n ckpt_full_path = os.path.join(cur_log_dir, \"cp-{epoch:04d}.ckpt\")\n callbacks.append(\n tf.keras.callbacks.ModelCheckpoint(\n ckpt_full_path, save_weights_only=True))\n return callbacks\n\n def _load_weights_if_possible(self, model, init_weight_path=None):\n \"\"\"Loads model weights when it is provided.\"\"\"\n if init_weight_path:\n logging.info(\"Load weights: {}\".format(init_weight_path))\n # TODO(b/139414977): Having the same variable restoring method for both\n # TPU and GPU.\n if self.use_tpu:\n checkpoint = tf.train.Checkpoint(\n model=model, optimizer=self._create_optimizer())\n checkpoint.restore(init_weight_path)\n else:\n model.load_weights(init_weight_path)\n else:\n logging.info(\"Weights not loaded from path:{}\".format(init_weight_path))\n\n def _create_optimizer(self):\n \"\"\"Creates optimizer.\"\"\"\n params = self.params\n # TODO(b/139414679): Explore the difference between using\n # LearningRateSchedule and callback for GPU runs, and try to merge them.\n lr_schedule = optimizer.LearningRateSchedule(\n params[\"learning_rate\"], params[\"hidden_size\"],\n params[\"learning_rate_warmup_steps\"])\n opt = tf.keras.optimizers.Adam(\n lr_schedule if self.use_tpu else params[\"learning_rate\"],\n params[\"optimizer_adam_beta1\"],\n params[\"optimizer_adam_beta2\"],\n epsilon=params[\"optimizer_adam_epsilon\"])\n\n if params[\"dtype\"] == tf.float16:\n opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(\n opt,\n loss_scale=flags_core.get_loss_scale(\n self.flags_obj, default_for_fp16=\"dynamic\"))\n if self.flags_obj.fp16_implementation == \"graph_rewrite\":\n # Note: when flags_obj.fp16_implementation == \"graph_rewrite\", dtype as\n # determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'\n # which will ensure tf.compat.v2.keras.mixed_precision and\n # tf.train.experimental.enable_mixed_precision_graph_rewrite do not double\n # up.\n opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt)\n\n return opt\n\n\ndef _ensure_dir(log_dir):\n \"\"\"Makes log dir if not existed.\"\"\"\n if not tf.io.gfile.exists(log_dir):\n tf.io.gfile.makedirs(log_dir)\n\n\ndef main(_):\n flags_obj = flags.FLAGS\n with logger.benchmark_context(flags_obj):\n task = TransformerTask(flags_obj)\n\n # Execute flag override logic for better model performance\n if flags_obj.tf_gpu_thread_mode:\n keras_utils.set_gpu_thread_mode_and_count(\n per_gpu_thread_count=flags_obj.per_gpu_thread_count,\n gpu_thread_mode=flags_obj.tf_gpu_thread_mode,\n num_gpus=flags_obj.num_gpus,\n datasets_num_private_threads=flags_obj.datasets_num_private_threads)\n\n if flags_obj.mode == \"train\":\n task.train()\n elif flags_obj.mode == \"predict\":\n task.predict()\n elif flags_obj.mode == \"eval\":\n task.eval()\n else:\n raise ValueError(\"Invalid mode {}\".format(flags_obj.mode))\n\n\nif __name__ == \"__main__\":\n tf.compat.v1.enable_v2_behavior()\n logging.set_verbosity(logging.INFO)\n misc.define_transformer_flags()\n app.run(main)\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Regression using the DNNRegressor Estimator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\n\nimport tensorflow as tf\n\nimport automobile_data\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', default=100, type=int, help='batch size')\nparser.add_argument('--train_steps', default=1000, type=int,\n help='number of training steps')\nparser.add_argument('--price_norm_factor', default=1000., type=float,\n help='price normalization factor')\n\ndef my_dnn_regression_fn(features, labels, mode, params):\n \"\"\"A model function implementing DNN regression for a custom Estimator.\"\"\"\n\n # Extract the input into a dense layer, according to the feature_columns.\n top = tf.feature_column.input_layer(features, params[\"feature_columns\"])\n\n # Iterate over the \"hidden_units\" list of layer sizes, default is [20].\n for units in params.get(\"hidden_units\", [20]):\n # Add a hidden layer, densely connected on top of the previous layer.\n top = tf.layers.dense(inputs=top, units=units, activation=tf.nn.relu)\n\n # Connect a linear output layer on top.\n output_layer = tf.layers.dense(inputs=top, units=1)\n\n # Reshape the output layer to a 1-dim Tensor to return predictions\n predictions = tf.squeeze(output_layer, 1)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n # In `PREDICT` mode we only need to return predictions.\n return tf.estimator.EstimatorSpec(\n mode=mode, predictions={\"price\": predictions})\n\n # Calculate loss using mean squared error\n average_loss = tf.losses.mean_squared_error(labels, predictions)\n\n # Pre-made estimators use the total_loss instead of the average,\n # so report total_loss for compatibility.\n batch_size = tf.shape(labels)[0]\n total_loss = tf.to_float(batch_size) * average_loss\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = params.get(\"optimizer\", tf.train.AdamOptimizer)\n optimizer = optimizer(params.get(\"learning_rate\", None))\n train_op = optimizer.minimize(\n loss=average_loss, global_step=tf.train.get_global_step())\n\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=total_loss, train_op=train_op)\n\n # In evaluation mode we will calculate evaluation metrics.\n assert mode == tf.estimator.ModeKeys.EVAL\n\n # Calculate root mean squared error\n print(labels)\n print(predictions)\n\n # Fixed for #4083\n predictions = tf.cast(predictions, tf.float64)\n\n rmse = tf.metrics.root_mean_squared_error(labels, predictions)\n\n # Add the rmse to the collection of evaluation metrics.\n eval_metrics = {\"rmse\": rmse}\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n # Report sum of error for compatibility with pre-made estimators\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n\n\ndef main(argv):\n \"\"\"Builds, trains, and evaluates the model.\"\"\"\n args = parser.parse_args(argv[1:])\n\n (train_x,train_y), (test_x, test_y) = automobile_data.load_data()\n\n train_y /= args.price_norm_factor\n test_y /= args.price_norm_factor\n\n # Provide the training input dataset.\n train_input_fn = automobile_data.make_dataset(args.batch_size, train_x, train_y, True, 1000)\n\n # Build the validation dataset.\n test_input_fn = automobile_data.make_dataset(args.batch_size, test_x, test_y)\n\n # The first way assigns a unique weight to each category. To do this you must\n # specify the category's vocabulary (values outside this specification will\n # receive a weight of zero). Here we specify the vocabulary using a list of\n # options. The vocabulary can also be specified with a vocabulary file (using\n # `categorical_column_with_vocabulary_file`). For features covering a\n # range of positive integers use `categorical_column_with_identity`.\n body_style_vocab = [\"hardtop\", \"wagon\", \"sedan\", \"hatchback\", \"convertible\"]\n body_style = tf.feature_column.categorical_column_with_vocabulary_list(\n key=\"body-style\", vocabulary_list=body_style_vocab)\n make = tf.feature_column.categorical_column_with_hash_bucket(\n key=\"make\", hash_bucket_size=50)\n\n feature_columns = [\n tf.feature_column.numeric_column(key=\"curb-weight\"),\n tf.feature_column.numeric_column(key=\"highway-mpg\"),\n # Since this is a DNN model, convert categorical columns from sparse\n # to dense.\n # Wrap them in an `indicator_column` to create a\n # one-hot vector from the input.\n tf.feature_column.indicator_column(body_style),\n # Or use an `embedding_column` to create a trainable vector for each\n # index.\n tf.feature_column.embedding_column(make, dimension=3),\n ]\n\n # Build a custom Estimator, using the model_fn.\n # `params` is passed through to the `model_fn`.\n model = tf.estimator.Estimator(\n model_fn=my_dnn_regression_fn,\n params={\n \"feature_columns\": feature_columns,\n \"learning_rate\": 0.001,\n \"optimizer\": tf.train.AdamOptimizer,\n \"hidden_units\": [20, 20]\n })\n\n # Train the model.\n model.train(input_fn=train_input_fn, steps=args.train_steps)\n\n # Evaluate how the model performs on data it has not yet seen.\n eval_result = model.evaluate(input_fn=test_input_fn)\n\n # Print the Root Mean Square Error (RMSE).\n print(\"\\n\" + 80 * \"*\")\n print(\"\\nRMS error for the test set: ${:.0f}\"\n .format(args.price_norm_factor * eval_result[\"rmse\"]))\n\n print()\n\n\nif __name__ == \"__main__\":\n # The Estimator periodically generates \"INFO\" logs; make these logs visible.\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main=main)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementation of embedding layer with shared weights.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\nfrom official.r1.utils import tpu as tpu_utils\n\n\nclass EmbeddingSharedWeights(tf.layers.Layer):\n \"\"\"Calculates input embeddings and pre-softmax linear with shared weights.\"\"\"\n\n def __init__(self, vocab_size, hidden_size, method=\"gather\"):\n \"\"\"Specify characteristic parameters of embedding layer.\n\n Args:\n vocab_size: Number of tokens in the embedding. (Typically ~32,000)\n hidden_size: Dimensionality of the embedding. (Typically 512 or 1024)\n method: Strategy for performing embedding lookup. \"gather\" uses tf.gather\n which performs well on CPUs and GPUs, but very poorly on TPUs. \"matmul\"\n one-hot encodes the indicies and formulates the embedding as a sparse\n matrix multiplication. The matmul formulation is wasteful as it does\n extra work, however matrix multiplication is very fast on TPUs which\n makes \"matmul\" considerably faster than \"gather\" on TPUs.\n \"\"\"\n super(EmbeddingSharedWeights, self).__init__()\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n if method not in (\"gather\", \"matmul\"):\n raise ValueError(\"method {} must be 'gather' or 'matmul'\".format(method))\n self.method = method\n\n def build(self, _):\n with tf.variable_scope(\"embedding_and_softmax\", reuse=tf.AUTO_REUSE):\n # Create and initialize weights. The random normal initializer was chosen\n # randomly, and works well.\n self.shared_weights = tf.get_variable(\n \"weights\", [self.vocab_size, self.hidden_size],\n initializer=tf.random_normal_initializer(\n 0., self.hidden_size ** -0.5))\n\n self.built = True\n\n def call(self, x):\n \"\"\"Get token embeddings of x.\n\n Args:\n x: An int64 tensor with shape [batch_size, length]\n Returns:\n embeddings: float32 tensor with shape [batch_size, length, embedding_size]\n padding: float32 tensor with shape [batch_size, length] indicating the\n locations of the padding tokens in x.\n \"\"\"\n with tf.name_scope(\"embedding\"):\n # Create binary mask of size [batch_size, length]\n mask = tf.to_float(tf.not_equal(x, 0))\n\n if self.method == \"gather\":\n embeddings = tf.gather(self.shared_weights, x)\n embeddings *= tf.expand_dims(mask, -1)\n else: # matmul\n embeddings = tpu_utils.embedding_matmul(\n embedding_table=self.shared_weights,\n values=tf.cast(x, dtype=tf.int32),\n mask=mask\n )\n # embedding_matmul already zeros out masked positions, so\n # `embeddings *= tf.expand_dims(mask, -1)` is unnecessary.\n\n\n # Scale embedding by the sqrt of the hidden size\n embeddings *= self.hidden_size ** 0.5\n\n return embeddings\n\n\n def linear(self, x):\n \"\"\"Computes logits by running x through a linear layer.\n\n Args:\n x: A float32 tensor with shape [batch_size, length, hidden_size]\n Returns:\n float32 tensor with shape [batch_size, length, vocab_size].\n \"\"\"\n with tf.name_scope(\"presoftmax_linear\"):\n batch_size = tf.shape(x)[0]\n length = tf.shape(x)[1]\n\n x = tf.reshape(x, [-1, self.hidden_size])\n logits = tf.matmul(x, self.shared_weights, transpose_b=True)\n\n return tf.reshape(logits, [batch_size, length, self.vocab_size])\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Networks for GAN compression example using TFGAN.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom slim.nets import dcgan\nfrom slim.nets import pix2pix\n\n\ndef _last_conv_layer(end_points):\n \"\"\"\"Returns the last convolutional layer from an endpoints dictionary.\"\"\"\n conv_list = [k if k[:4] == 'conv' else None for k in end_points.keys()]\n conv_list.sort()\n return end_points[conv_list[-1]]\n\n\ndef _encoder(img_batch, is_training=True, bits=64, depth=64):\n \"\"\"Maps images to internal representation.\n\n Args:\n img_batch: Stuff\n is_training: Stuff\n bits: Number of bits per patch.\n depth: Stuff\n\n Returns:\n Real-valued 2D Tensor of size [batch_size, bits].\n \"\"\"\n _, end_points = dcgan.discriminator(\n img_batch, depth=depth, is_training=is_training, scope='Encoder')\n\n # (joelshor): Make the DCGAN convolutional layer that converts to logits\n # not trainable, since it doesn't affect the encoder output.\n\n # Get the pre-logit layer, which is the last conv.\n net = _last_conv_layer(end_points)\n\n # Transform the features to the proper number of bits.\n with tf.variable_scope('EncoderTransformer'):\n encoded = tf.contrib.layers.conv2d(net, bits, kernel_size=1, stride=1,\n padding='VALID', normalizer_fn=None,\n activation_fn=None)\n encoded = tf.squeeze(encoded, [1, 2])\n encoded.shape.assert_has_rank(2)\n\n # Map encoded to the range [-1, 1].\n return tf.nn.softsign(encoded)\n\n\ndef _binarizer(prebinary_codes, is_training):\n \"\"\"Binarize compression logits.\n\n During training, add noise, as in https://arxiv.org/pdf/1611.01704.pdf. During\n eval, map [-1, 1] -> {-1, 1}.\n\n Args:\n prebinary_codes: Floating-point tensors corresponding to pre-binary codes.\n Shape is [batch, code_length].\n is_training: A python bool. If True, add noise. If false, binarize.\n\n Returns:\n Binarized codes. Shape is [batch, code_length].\n\n Raises:\n ValueError: If the shape of `prebinary_codes` isn't static.\n \"\"\"\n if is_training:\n # In order to train codes that can be binarized during eval, we add noise as\n # in https://arxiv.org/pdf/1611.01704.pdf. Another option is to use a\n # stochastic node, as in https://arxiv.org/abs/1608.05148.\n noise = tf.random_uniform(\n prebinary_codes.shape,\n minval=-1.0,\n maxval=1.0)\n return prebinary_codes + noise\n else:\n return tf.sign(prebinary_codes)\n\n\ndef _decoder(codes, final_size, is_training, depth=64):\n \"\"\"Compression decoder.\"\"\"\n decoded_img, _ = dcgan.generator(\n codes,\n depth=depth,\n final_size=final_size,\n num_outputs=3,\n is_training=is_training,\n scope='Decoder')\n\n # Map output to [-1, 1].\n # Use softsign instead of tanh, as per empirical results of\n # http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf.\n return tf.nn.softsign(decoded_img)\n\n\ndef _validate_image_inputs(image_batch):\n image_batch.shape.assert_has_rank(4)\n image_batch.shape[1:].assert_is_fully_defined()\n\n\ndef compression_model(image_batch, num_bits=64, depth=64, is_training=True):\n \"\"\"Image compression model.\n\n Args:\n image_batch: A batch of images to compress and reconstruct. Images should\n be normalized already. Shape is [batch, height, width, channels].\n num_bits: Desired number of bits per image in the compressed representation.\n depth: The base number of filters for the encoder and decoder networks.\n is_training: A python bool. If False, run in evaluation mode.\n\n Returns:\n uncompressed images, binary codes, prebinary codes\n \"\"\"\n image_batch = tf.convert_to_tensor(image_batch)\n _validate_image_inputs(image_batch)\n final_size = image_batch.shape.as_list()[1]\n\n prebinary_codes = _encoder(image_batch, is_training, num_bits, depth)\n binary_codes = _binarizer(prebinary_codes, is_training)\n uncompressed_imgs = _decoder(binary_codes, final_size, is_training, depth)\n return uncompressed_imgs, binary_codes, prebinary_codes\n\n\ndef discriminator(image_batch, unused_conditioning=None, depth=64):\n \"\"\"A thin wrapper around the pix2pix discriminator to conform to TFGAN API.\"\"\"\n logits, _ = pix2pix.pix2pix_discriminator(\n image_batch, num_filters=[depth, 2 * depth, 4 * depth, 8 * depth])\n return tf.layers.flatten(logits)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Runs a ResNet model on the ImageNet dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.benchmark.models import trivial_model\nfrom official.utils.flags import core as flags_core\nfrom official.utils.logs import logger\nfrom official.utils.misc import distribution_utils\nfrom official.utils.misc import keras_utils\nfrom official.utils.misc import model_helpers\nfrom official.vision.image_classification import common\nfrom official.vision.image_classification import imagenet_preprocessing\nfrom official.vision.image_classification import resnet_model\n\n\ndef run(flags_obj):\n \"\"\"Run ResNet ImageNet training and eval loop using native Keras APIs.\n\n Args:\n flags_obj: An object containing parsed flag values.\n\n Raises:\n ValueError: If fp16 is passed as it is not currently supported.\n\n Returns:\n Dictionary of training and eval stats.\n \"\"\"\n keras_utils.set_session_config(\n enable_eager=flags_obj.enable_eager,\n enable_xla=flags_obj.enable_xla)\n\n # Execute flag override logic for better model performance\n if flags_obj.tf_gpu_thread_mode:\n keras_utils.set_gpu_thread_mode_and_count(\n per_gpu_thread_count=flags_obj.per_gpu_thread_count,\n gpu_thread_mode=flags_obj.tf_gpu_thread_mode,\n num_gpus=flags_obj.num_gpus,\n datasets_num_private_threads=flags_obj.datasets_num_private_threads)\n common.set_cudnn_batchnorm_mode()\n\n dtype = flags_core.get_tf_dtype(flags_obj)\n if dtype == tf.float16:\n loss_scale = flags_core.get_loss_scale(flags_obj, default_for_fp16=128)\n policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(\n 'mixed_float16', loss_scale=loss_scale)\n tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)\n if not keras_utils.is_v2_0():\n raise ValueError('--dtype=fp16 is not supported in TensorFlow 1.')\n elif dtype == tf.bfloat16:\n policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(\n 'mixed_bfloat16')\n tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)\n\n data_format = flags_obj.data_format\n if data_format is None:\n data_format = ('channels_first'\n if tf.test.is_built_with_cuda() else 'channels_last')\n tf.keras.backend.set_image_data_format(data_format)\n\n # Configures cluster spec for distribution strategy.\n num_workers = distribution_utils.configure_cluster(flags_obj.worker_hosts,\n flags_obj.task_index)\n\n strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy=flags_obj.distribution_strategy,\n num_gpus=flags_obj.num_gpus,\n num_workers=num_workers,\n all_reduce_alg=flags_obj.all_reduce_alg,\n num_packs=flags_obj.num_packs,\n tpu_address=flags_obj.tpu)\n\n if strategy:\n # flags_obj.enable_get_next_as_optional controls whether enabling\n # get_next_as_optional behavior in DistributedIterator. If true, last\n # partial batch can be supported.\n strategy.extended.experimental_enable_get_next_as_optional = (\n flags_obj.enable_get_next_as_optional\n )\n\n strategy_scope = distribution_utils.get_strategy_scope(strategy)\n\n # pylint: disable=protected-access\n if flags_obj.use_synthetic_data:\n distribution_utils.set_up_synthetic_data()\n input_fn = common.get_synth_input_fn(\n height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,\n width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,\n num_channels=imagenet_preprocessing.NUM_CHANNELS,\n num_classes=imagenet_preprocessing.NUM_CLASSES,\n dtype=dtype,\n drop_remainder=True)\n else:\n distribution_utils.undo_set_up_synthetic_data()\n input_fn = imagenet_preprocessing.input_fn\n\n # When `enable_xla` is True, we always drop the remainder of the batches\n # in the dataset, as XLA-GPU doesn't support dynamic shapes.\n drop_remainder = flags_obj.enable_xla\n\n train_input_dataset = input_fn(\n is_training=True,\n data_dir=flags_obj.data_dir,\n batch_size=flags_obj.batch_size,\n num_epochs=flags_obj.train_epochs,\n parse_record_fn=imagenet_preprocessing.parse_record,\n datasets_num_private_threads=flags_obj.datasets_num_private_threads,\n dtype=dtype,\n drop_remainder=drop_remainder,\n tf_data_experimental_slack=flags_obj.tf_data_experimental_slack,\n training_dataset_cache=flags_obj.training_dataset_cache,\n )\n\n eval_input_dataset = None\n if not flags_obj.skip_eval:\n eval_input_dataset = input_fn(\n is_training=False,\n data_dir=flags_obj.data_dir,\n batch_size=flags_obj.batch_size,\n num_epochs=flags_obj.train_epochs,\n parse_record_fn=imagenet_preprocessing.parse_record,\n dtype=dtype,\n drop_remainder=drop_remainder)\n\n lr_schedule = 0.1\n if flags_obj.use_tensor_lr:\n lr_schedule = common.PiecewiseConstantDecayWithWarmup(\n batch_size=flags_obj.batch_size,\n epoch_size=imagenet_preprocessing.NUM_IMAGES['train'],\n warmup_epochs=common.LR_SCHEDULE[0][1],\n boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]),\n multipliers=list(p[0] for p in common.LR_SCHEDULE),\n compute_lr_on_cpu=True)\n\n with strategy_scope:\n optimizer = common.get_optimizer(lr_schedule)\n if flags_obj.fp16_implementation == 'graph_rewrite':\n # Note: when flags_obj.fp16_implementation == \"graph_rewrite\", dtype as\n # determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'\n # which will ensure tf.compat.v2.keras.mixed_precision and\n # tf.train.experimental.enable_mixed_precision_graph_rewrite do not double\n # up.\n optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(\n optimizer)\n\n # TODO(hongkuny): Remove trivial model usage and move it to benchmark.\n if flags_obj.use_trivial_model:\n model = trivial_model.trivial_model(\n imagenet_preprocessing.NUM_CLASSES)\n else:\n model = resnet_model.resnet50(\n num_classes=imagenet_preprocessing.NUM_CLASSES)\n\n # TODO(b/138957587): Remove when force_v2_in_keras_compile is on longer\n # a valid arg for this model. Also remove as a valid flag.\n if flags_obj.force_v2_in_keras_compile is not None:\n model.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=optimizer,\n metrics=(['sparse_categorical_accuracy']\n if flags_obj.report_accuracy_metrics else None),\n run_eagerly=flags_obj.run_eagerly,\n experimental_run_tf_function=flags_obj.force_v2_in_keras_compile)\n else:\n model.compile(\n loss='sparse_categorical_crossentropy',\n optimizer=optimizer,\n metrics=(['sparse_categorical_accuracy']\n if flags_obj.report_accuracy_metrics else None),\n run_eagerly=flags_obj.run_eagerly)\n\n steps_per_epoch = (\n imagenet_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size)\n train_epochs = flags_obj.train_epochs\n\n callbacks = common.get_callbacks(steps_per_epoch,\n common.learning_rate_schedule)\n if flags_obj.enable_checkpoint_and_export:\n ckpt_full_path = os.path.join(flags_obj.model_dir, 'model.ckpt-{epoch:04d}')\n callbacks.append(tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,\n save_weights_only=True))\n\n # if mutliple epochs, ignore the train_steps flag.\n if train_epochs <= 1 and flags_obj.train_steps:\n steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch)\n train_epochs = 1\n\n num_eval_steps = (\n imagenet_preprocessing.NUM_IMAGES['validation'] // flags_obj.batch_size)\n\n validation_data = eval_input_dataset\n if flags_obj.skip_eval:\n # Only build the training graph. This reduces memory usage introduced by\n # control flow ops in layers that have different implementations for\n # training and inference (e.g., batch norm).\n if flags_obj.set_learning_phase_to_train:\n # TODO(haoyuzhang): Understand slowdown of setting learning phase when\n # not using distribution strategy.\n tf.keras.backend.set_learning_phase(1)\n num_eval_steps = None\n validation_data = None\n\n if not strategy and flags_obj.explicit_gpu_placement:\n # TODO(b/135607227): Add device scope automatically in Keras training loop\n # when not using distribition strategy.\n no_dist_strat_device = tf.device('/device:GPU:0')\n no_dist_strat_device.__enter__()\n\n history = model.fit(train_input_dataset,\n epochs=train_epochs,\n steps_per_epoch=steps_per_epoch,\n callbacks=callbacks,\n validation_steps=num_eval_steps,\n validation_data=validation_data,\n validation_freq=flags_obj.epochs_between_evals,\n verbose=2)\n if flags_obj.enable_checkpoint_and_export:\n if dtype == tf.bfloat16:\n logging.warning(\"Keras model.save does not support bfloat16 dtype.\")\n else:\n # Keras model.save assumes a float32 input designature.\n export_path = os.path.join(flags_obj.model_dir, 'saved_model')\n model.save(export_path, include_optimizer=False)\n\n eval_output = None\n if not flags_obj.skip_eval:\n eval_output = model.evaluate(eval_input_dataset,\n steps=num_eval_steps,\n verbose=2)\n\n if not strategy and flags_obj.explicit_gpu_placement:\n no_dist_strat_device.__exit__()\n\n stats = common.build_stats(history, eval_output, callbacks)\n return stats\n\n\ndef define_imagenet_keras_flags():\n common.define_keras_flags()\n flags_core.set_defaults()\n flags.adopt_module_key_flags(common)\n\n\ndef main(_):\n model_helpers.apply_clean(flags.FLAGS)\n with logger.benchmark_context(flags.FLAGS):\n stats = run(flags.FLAGS)\n logging.info('Run stats:\\n%s', stats)\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.INFO)\n define_imagenet_keras_flags()\n app.run(main)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for models.lstm_ssd_mobilenet_v1_feature_extractor.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom lstm_object_detection.models import lstm_ssd_mobilenet_v1_feature_extractor as feature_extactor\nfrom object_detection.models import ssd_feature_extractor_test\n\nslim = tf.contrib.slim\n\n\nclass LstmSsdMobilenetV1FeatureExtractorTest(\n ssd_feature_extractor_test.SsdFeatureExtractorTestBase):\n\n def _create_feature_extractor(self,\n depth_multiplier=1.0,\n pad_to_multiple=1,\n is_training=True,\n use_explicit_padding=False):\n \"\"\"Constructs a new feature extractor.\n\n Args:\n depth_multiplier: A float depth multiplier for feature extractor.\n pad_to_multiple: The nearest multiple to zero pad the input height and\n width dimensions to.\n is_training: A boolean whether the network is in training mode.\n use_explicit_padding: A boolean whether to use explicit padding.\n\n Returns:\n An lstm_ssd_meta_arch.LSTMSSDMobileNetV1FeatureExtractor object.\n \"\"\"\n min_depth = 32\n extractor = (\n feature_extactor.LSTMSSDMobileNetV1FeatureExtractor(\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n self.conv_hyperparams_fn,\n use_explicit_padding=use_explicit_padding))\n extractor.lstm_state_depth = int(256 * depth_multiplier)\n return extractor\n\n def test_extract_features_returns_correct_shapes_256(self):\n image_height = 256\n image_width = 256\n depth_multiplier = 1.0\n pad_to_multiple = 1\n batch_size = 5\n expected_feature_map_shape = [(batch_size, 8, 8, 256), (batch_size, 4, 4,\n 512),\n (batch_size, 2, 2, 256), (batch_size, 1, 1,\n 256)]\n self.check_extract_features_returns_correct_shape(\n batch_size,\n image_height,\n image_width,\n depth_multiplier,\n pad_to_multiple,\n expected_feature_map_shape,\n use_explicit_padding=False)\n self.check_extract_features_returns_correct_shape(\n batch_size,\n image_height,\n image_width,\n depth_multiplier,\n pad_to_multiple,\n expected_feature_map_shape,\n use_explicit_padding=True)\n\n def test_preprocess_returns_correct_value_range(self):\n test_image = np.random.rand(5, 128, 128, 3)\n feature_extractor = self._create_feature_extractor()\n preprocessed_image = feature_extractor.preprocess(test_image)\n self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))\n\n def test_variables_only_created_in_scope(self):\n scope_name = 'MobilenetV1'\n g = tf.Graph()\n with g.as_default():\n preprocessed_inputs = tf.placeholder(tf.float32, (5, 256, 256, 3))\n feature_extractor = self._create_feature_extractor()\n feature_extractor.extract_features(preprocessed_inputs)\n variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n find_scope = False\n for variable in variables:\n if scope_name in variable.name:\n find_scope = True\n break\n self.assertTrue(find_scope)\n\n def test_lstm_non_zero_state(self):\n init_state = {\n 'lstm_state_c': tf.zeros([8, 8, 256]),\n 'lstm_state_h': tf.zeros([8, 8, 256]),\n 'lstm_state_step': tf.zeros([1])\n }\n seq = {'test': tf.random_uniform([3, 1, 1, 1])}\n stateful_reader = tf.contrib.training.SequenceQueueingStateSaver(\n batch_size=1,\n num_unroll=1,\n input_length=2,\n input_key='',\n input_sequences=seq,\n input_context={},\n initial_states=init_state,\n capacity=1)\n feature_extractor = self._create_feature_extractor()\n image = tf.random_uniform([5, 256, 256, 3])\n with tf.variable_scope('zero_state'):\n feature_map = feature_extractor.extract_features(\n image, stateful_reader.next_batch)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run([stateful_reader.prefetch_op])\n _ = sess.run([feature_map])\n # Update states with the next batch.\n state = sess.run(stateful_reader.next_batch.state('lstm_state_c'))\n # State should no longer be zero after update.\n self.assertTrue(state.any())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the definition for Inflated 3D Inception V1 (I3D).\n\nThe network architecture is proposed by:\n Joao Carreira and Andrew Zisserman,\n Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset.\n https://arxiv.org/abs/1705.07750\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib import slim as contrib_slim\n\nfrom nets import i3d_utils\nfrom nets import s3dg\n\nslim = contrib_slim\ntrunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)\nconv3d_spatiotemporal = i3d_utils.conv3d_spatiotemporal\n\n\ndef i3d_arg_scope(weight_decay=1e-7,\n batch_norm_decay=0.999,\n batch_norm_epsilon=0.001,\n use_renorm=False,\n separable_conv3d=False):\n \"\"\"Defines default arg_scope for I3D.\n\n Args:\n weight_decay: The weight decay to use for regularizing the model.\n batch_norm_decay: Decay for batch norm moving average.\n batch_norm_epsilon: Small float added to variance to avoid dividing by zero\n in batch norm.\n use_renorm: Whether to use batch renormalization or not.\n separable_conv3d: Whether to use separable 3d Convs.\n\n Returns:\n sc: An arg_scope to use for the models.\n \"\"\"\n batch_norm_params = {\n # Decay for the moving averages.\n 'decay': batch_norm_decay,\n # epsilon to prevent 0s in variance.\n 'epsilon': batch_norm_epsilon,\n # Turns off fused batch norm.\n 'fused': False,\n 'renorm': use_renorm,\n # collection containing the moving mean and moving variance.\n 'variables_collections': {\n 'beta': None,\n 'gamma': None,\n 'moving_mean': ['moving_vars'],\n 'moving_variance': ['moving_vars'],\n }\n }\n\n with slim.arg_scope(\n [slim.conv3d, conv3d_spatiotemporal],\n weights_regularizer=slim.l2_regularizer(weight_decay),\n activation_fn=tf.nn.relu,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n with slim.arg_scope(\n [conv3d_spatiotemporal], separable=separable_conv3d) as sc:\n return sc\n\n\ndef i3d_base(inputs, final_endpoint='Mixed_5c',\n scope='InceptionV1'):\n \"\"\"Defines the I3D base architecture.\n\n Note that we use the names as defined in Inception V1 to facilitate checkpoint\n conversion from an image-trained Inception V1 checkpoint to I3D checkpoint.\n\n Args:\n inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,\n channels].\n final_endpoint: Specifies the endpoint to construct the network up to. It\n can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',\n 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',\n 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']\n scope: Optional variable_scope.\n\n Returns:\n A dictionary from components of the network to the corresponding activation.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values.\n \"\"\"\n\n return s3dg.s3dg_base(\n inputs,\n first_temporal_kernel_size=7,\n temporal_conv_startat='Conv2d_2c_3x3',\n gating_startat=None,\n final_endpoint=final_endpoint,\n min_depth=16,\n depth_multiplier=1.0,\n data_format='NDHWC',\n scope=scope)\n\n\ndef i3d(inputs,\n num_classes=1000,\n dropout_keep_prob=0.8,\n is_training=True,\n prediction_fn=slim.softmax,\n spatial_squeeze=True,\n reuse=None,\n scope='InceptionV1'):\n \"\"\"Defines the I3D architecture.\n\n The default image size used to train this network is 224x224.\n\n Args:\n inputs: A 5-D float tensor of size [batch_size, num_frames, height, width,\n channels].\n num_classes: number of predicted classes.\n dropout_keep_prob: the percentage of activation values that are retained.\n is_training: whether is training or not.\n prediction_fn: a function to get predictions out of logits.\n spatial_squeeze: if True, logits is of shape is [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n logits: the pre-softmax activations, a tensor of size\n [batch_size, num_classes]\n end_points: a dictionary from components of the network to the corresponding\n activation.\n \"\"\"\n # Final pooling and prediction\n with tf.variable_scope(\n scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:\n with slim.arg_scope(\n [slim.batch_norm, slim.dropout], is_training=is_training):\n net, end_points = i3d_base(inputs, scope=scope)\n with tf.variable_scope('Logits'):\n kernel_size = i3d_utils.reduced_kernel_size_3d(net, [2, 7, 7])\n net = slim.avg_pool3d(\n net, kernel_size, stride=1, scope='AvgPool_0a_7x7')\n net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')\n logits = slim.conv3d(\n net,\n num_classes, [1, 1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='Conv2d_0c_1x1')\n # Temporal average pooling.\n logits = tf.reduce_mean(logits, axis=1)\n if spatial_squeeze:\n logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')\n\n end_points['Logits'] = logits\n end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return logits, end_points\n\n\ni3d.default_image_size = 224\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lstm_object_detection.lstm.lstm_cells.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom lstm_object_detection.lstm import lstm_cells\n\n\nclass BottleneckConvLstmCellsTest(tf.test.TestCase):\n\n def test_run_lstm_cell(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = False\n\n inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units)\n init_state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n output, state_tuple = cell(inputs, init_state)\n self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state_tuple[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state_tuple[1].shape.as_list())\n\n def test_run_lstm_cell_with_flattened_state(self):\n filter_size = [3, 3]\n output_dim = 10\n output_size = [output_dim] * 2\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = False\n\n inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32)\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n flatten_state=True)\n init_state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n output, state_tuple = cell(inputs, init_state)\n self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())\n self.assertAllEqual([4, 1500], state_tuple[0].shape.as_list())\n self.assertAllEqual([4, 1500], state_tuple[1].shape.as_list())\n\n def test_run_lstm_cell_with_output_bottleneck(self):\n filter_size = [3, 3]\n output_dim = 10\n output_size = [output_dim] * 2\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = False\n\n inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32)\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n output_bottleneck=True)\n init_state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n output, state_tuple = cell(inputs, init_state)\n self.assertAllEqual([4, 10, 10, 30], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state_tuple[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state_tuple[1].shape.as_list())\n\n def test_get_init_state(self):\n filter_size = [3, 3]\n output_dim = 10\n output_size = [output_dim] * 2\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = False\n\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units)\n init_c, init_h = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n\n self.assertEqual(tf.float32, init_c.dtype)\n self.assertEqual(tf.float32, init_h.dtype)\n with self.test_session() as sess:\n init_c_res, init_h_res = sess.run([init_c, init_h])\n self.assertAllClose(np.zeros((4, 10, 10, 15)), init_c_res)\n self.assertAllClose(np.zeros((4, 10, 10, 15)), init_h_res)\n\n def test_get_init_learned_state(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = True\n\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units)\n init_c, init_h = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n\n self.assertEqual(tf.float32, init_c.dtype)\n self.assertEqual(tf.float32, init_h.dtype)\n self.assertAllEqual([4, 10, 10, 15], init_c.shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], init_h.shape.as_list())\n\n def test_unroll(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n unroll = 10\n learned_state = False\n\n inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units)\n state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n for step in range(unroll):\n output, state = cell(inputs, state)\n self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state[1].shape.as_list())\n\n def test_prebottleneck(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n unroll = 10\n learned_state = False\n\n inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)\n inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n pre_bottleneck=True)\n state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n for step in range(unroll):\n if step % 2 == 0:\n inputs = cell.pre_bottleneck(inputs_large, state[1], 0)\n else:\n inputs = cell.pre_bottleneck(inputs_small, state[1], 1)\n output, state = cell(inputs, state)\n self.assertAllEqual([4, 10, 10, 15], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 15], state[1].shape.as_list())\n\n def test_flatten_state(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 15\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n unroll = 10\n learned_state = False\n\n inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)\n inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.BottleneckConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n pre_bottleneck=True,\n flatten_state=True)\n state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n for step in range(unroll):\n if step % 2 == 0:\n inputs = cell.pre_bottleneck(inputs_large, state[1], 0)\n else:\n inputs = cell.pre_bottleneck(inputs_small, state[1], 1)\n output, state = cell(inputs, state)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output_result, state_result = sess.run([output, state])\n self.assertAllEqual((4, 10, 10, 15), output_result.shape)\n self.assertAllEqual((4, 10*10*15), state_result[0].shape)\n self.assertAllEqual((4, 10*10*15), state_result[1].shape)\n\n\nclass GroupedConvLstmCellsTest(tf.test.TestCase):\n\n def test_run_lstm_cell(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 16\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = False\n\n inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n is_training=True)\n init_state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n output, state_tuple = cell(inputs, init_state)\n self.assertAllEqual([4, 10, 10, 16], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state_tuple[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state_tuple[1].shape.as_list())\n\n def test_run_lstm_cell_with_output_bottleneck(self):\n filter_size = [3, 3]\n output_dim = 10\n output_size = [output_dim] * 2\n num_units = 16\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = False\n\n inputs = tf.zeros([batch_size, output_dim, output_dim, 3], dtype=tf.float32)\n cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n is_training=True,\n output_bottleneck=True)\n init_state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n output, state_tuple = cell(inputs, init_state)\n self.assertAllEqual([4, 10, 10, 32], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state_tuple[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state_tuple[1].shape.as_list())\n\n def test_get_init_state(self):\n filter_size = [3, 3]\n output_dim = 10\n output_size = [output_dim] * 2\n num_units = 16\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = False\n\n cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n is_training=True)\n init_c, init_h = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n\n self.assertEqual(tf.float32, init_c.dtype)\n self.assertEqual(tf.float32, init_h.dtype)\n with self.test_session() as sess:\n init_c_res, init_h_res = sess.run([init_c, init_h])\n self.assertAllClose(np.zeros((4, 10, 10, 16)), init_c_res)\n self.assertAllClose(np.zeros((4, 10, 10, 16)), init_h_res)\n\n def test_get_init_learned_state(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 16\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n learned_state = True\n\n cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n is_training=True)\n init_c, init_h = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n\n self.assertEqual(tf.float32, init_c.dtype)\n self.assertEqual(tf.float32, init_h.dtype)\n self.assertAllEqual([4, 10, 10, 16], init_c.shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], init_h.shape.as_list())\n\n def test_unroll(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 16\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n unroll = 10\n learned_state = False\n\n inputs = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n is_training=True)\n state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n for step in range(unroll):\n output, state = cell(inputs, state)\n self.assertAllEqual([4, 10, 10, 16], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state[1].shape.as_list())\n\n def test_prebottleneck(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 16\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n unroll = 10\n learned_state = False\n\n inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)\n inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n is_training=True,\n pre_bottleneck=True)\n state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n for step in range(unroll):\n if step % 2 == 0:\n inputs = cell.pre_bottleneck(inputs_large, state[1], 0)\n else:\n inputs = cell.pre_bottleneck(inputs_small, state[1], 1)\n output, state = cell(inputs, state)\n self.assertAllEqual([4, 10, 10, 16], output.shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state[0].shape.as_list())\n self.assertAllEqual([4, 10, 10, 16], state[1].shape.as_list())\n\n def test_flatten_state(self):\n filter_size = [3, 3]\n output_size = [10, 10]\n num_units = 16\n state_name = 'lstm_state'\n batch_size = 4\n dtype = tf.float32\n unroll = 10\n learned_state = False\n\n inputs_large = tf.zeros([4, 10, 10, 5], dtype=tf.float32)\n inputs_small = tf.zeros([4, 10, 10, 3], dtype=tf.float32)\n cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=filter_size,\n output_size=output_size,\n num_units=num_units,\n is_training=True,\n pre_bottleneck=True,\n flatten_state=True)\n state = cell.init_state(\n state_name, batch_size, dtype, learned_state)\n for step in range(unroll):\n if step % 2 == 0:\n inputs = cell.pre_bottleneck(inputs_large, state[1], 0)\n else:\n inputs = cell.pre_bottleneck(inputs_small, state[1], 1)\n output, state = cell(inputs, state)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n output_result, state_result = sess.run([output, state])\n self.assertAllEqual((4, 10, 10, 16), output_result.shape)\n self.assertAllEqual((4, 10*10*16), state_result[0].shape)\n self.assertAllEqual((4, 10*10*16), state_result[1].shape)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"LSTDInterleavedFeatureExtractor which interleaves multiple MobileNet V2.\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import ops as tf_ops\nfrom lstm_object_detection.lstm import lstm_cells\nfrom lstm_object_detection.lstm import rnn_decoder\nfrom lstm_object_detection.meta_architectures import lstm_ssd_meta_arch\nfrom lstm_object_detection.models import mobilenet_defs\nfrom object_detection.models import feature_map_generators\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nfrom nets.mobilenet import mobilenet\nfrom nets.mobilenet import mobilenet_v2\n\nslim = tf.contrib.slim\n\n\nclass LSTMSSDInterleavedMobilenetV2FeatureExtractor(\n lstm_ssd_meta_arch.LSTMSSDInterleavedFeatureExtractor):\n \"\"\"LSTM-SSD Interleaved Feature Extractor using MobilenetV2 features.\"\"\"\n\n def __init__(self,\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n conv_hyperparams_fn,\n reuse_weights=None,\n use_explicit_padding=False,\n use_depthwise=True,\n override_base_feature_extractor_hyperparams=False):\n \"\"\"Interleaved Feature Extractor for LSTD Models with MobileNet v2.\n\n Args:\n is_training: whether the network is in training mode.\n depth_multiplier: float depth multiplier for feature extractor.\n min_depth: minimum feature extractor depth.\n pad_to_multiple: the nearest multiple to zero pad the input height and\n width dimensions to.\n conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d\n and separable_conv2d ops in the layers that are added on top of the\n base feature extractor.\n reuse_weights: Whether to reuse variables. Default is None.\n use_explicit_padding: Whether to use explicit padding when extracting\n features. Default is False.\n use_depthwise: Whether to use depthwise convolutions. Default is True.\n override_base_feature_extractor_hyperparams: Whether to override\n hyperparameters of the base feature extractor with the one from\n `conv_hyperparams_fn`.\n \"\"\"\n super(LSTMSSDInterleavedMobilenetV2FeatureExtractor, self).__init__(\n is_training, depth_multiplier, min_depth, pad_to_multiple,\n conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,\n override_base_feature_extractor_hyperparams)\n # RANDOM_SKIP_SMALL means the training policy is random and the small model\n # does not update state during training.\n if self._is_training:\n self._interleave_method = 'RANDOM_SKIP_SMALL'\n else:\n self._interleave_method = 'SKIP9'\n\n self._flatten_state = False\n self._scale_state = False\n self._clip_state = True\n self._pre_bottleneck = True\n self._feature_map_layout = {\n 'from_layer': ['layer_19', '', '', '', ''],\n 'layer_depth': [-1, 256, 256, 256, 256],\n 'use_depthwise': self._use_depthwise,\n 'use_explicit_padding': self._use_explicit_padding,\n }\n self._low_res = True\n self._base_network_scope = 'MobilenetV2'\n\n def extract_base_features_large(self, preprocessed_inputs):\n \"\"\"Extract the large base model features.\n\n Variables are created under the scope of <scope>/MobilenetV2_1/\n\n Args:\n preprocessed_inputs: preprocessed input images of shape:\n [batch, width, height, depth].\n\n Returns:\n net: the last feature map created from the base feature extractor.\n end_points: a dictionary of feature maps created.\n \"\"\"\n scope_name = self._base_network_scope + '_1'\n with tf.variable_scope(scope_name, reuse=self._reuse_weights) as base_scope:\n net, end_points = mobilenet_v2.mobilenet_base(\n preprocessed_inputs,\n depth_multiplier=self._depth_multipliers[0],\n conv_defs=mobilenet_defs.mobilenet_v2_lite_def(\n is_quantized=self._is_quantized),\n use_explicit_padding=self._use_explicit_padding,\n scope=base_scope)\n return net, end_points\n\n def extract_base_features_small(self, preprocessed_inputs):\n \"\"\"Extract the small base model features.\n\n Variables are created under the scope of <scope>/MobilenetV2_2/\n\n Args:\n preprocessed_inputs: preprocessed input images of shape:\n [batch, width, height, depth].\n\n Returns:\n net: the last feature map created from the base feature extractor.\n end_points: a dictionary of feature maps created.\n \"\"\"\n scope_name = self._base_network_scope + '_2'\n with tf.variable_scope(scope_name, reuse=self._reuse_weights) as base_scope:\n if self._low_res:\n size_small = preprocessed_inputs.get_shape().as_list()[1] / 2\n inputs_small = tf.image.resize_images(preprocessed_inputs,\n [size_small, size_small])\n # Create end point handle for tflite deployment.\n with tf.name_scope(None):\n inputs_small = tf.identity(\n inputs_small, name='normalized_input_image_tensor_small')\n else:\n inputs_small = preprocessed_inputs\n net, end_points = mobilenet_v2.mobilenet_base(\n inputs_small,\n depth_multiplier=self._depth_multipliers[1],\n conv_defs=mobilenet_defs.mobilenet_v2_lite_def(\n is_quantized=self._is_quantized, low_res=self._low_res),\n use_explicit_padding=self._use_explicit_padding,\n scope=base_scope)\n return net, end_points\n\n def create_lstm_cell(self, batch_size, output_size, state_saver, state_name):\n \"\"\"Create the LSTM cell, and initialize state if necessary.\n\n Args:\n batch_size: input batch size.\n output_size: output size of the lstm cell, [width, height].\n state_saver: a state saver object with methods `state` and `save_state`.\n state_name: string, the name to use with the state_saver.\n Returns:\n lstm_cell: the lstm cell unit.\n init_state: initial state representations.\n step: the step\n \"\"\"\n lstm_cell = lstm_cells.GroupedConvLSTMCell(\n filter_size=(3, 3),\n output_size=output_size,\n num_units=max(self._min_depth, self._lstm_state_depth),\n is_training=self._is_training,\n activation=tf.nn.relu6,\n flatten_state=self._flatten_state,\n scale_state=self._scale_state,\n clip_state=self._clip_state,\n output_bottleneck=True,\n pre_bottleneck=self._pre_bottleneck,\n is_quantized=self._is_quantized,\n visualize_gates=False)\n\n if state_saver is None:\n init_state = lstm_cell.init_state('lstm_state', batch_size, tf.float32)\n step = None\n else:\n step = state_saver.state(state_name + '_step')\n c = state_saver.state(state_name + '_c')\n h = state_saver.state(state_name + '_h')\n c.set_shape([batch_size] + c.get_shape().as_list()[1:])\n h.set_shape([batch_size] + h.get_shape().as_list()[1:])\n init_state = (c, h)\n return lstm_cell, init_state, step\n\n def extract_features(self, preprocessed_inputs, state_saver=None,\n state_name='lstm_state', unroll_length=10, scope=None):\n \"\"\"Extract features from preprocessed inputs.\n\n The features include the base network features, lstm features and SSD\n features, organized in the following name scope:\n\n <scope>/MobilenetV2_1/...\n <scope>/MobilenetV2_2/...\n <scope>/LSTM/...\n <scope>/FeatureMap/...\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of consecutive frames from video clips.\n state_saver: A state saver object with methods `state` and `save_state`.\n state_name: Python string, the name to use with the state_saver.\n unroll_length: number of steps to unroll the lstm.\n scope: Scope for the base network of the feature extractor.\n\n Returns:\n feature_maps: a list of tensors where the ith tensor has shape\n [batch, height_i, width_i, depth_i]\n Raises:\n ValueError: if interleave_method not recognized or large and small base\n network output feature maps of different sizes.\n \"\"\"\n preprocessed_inputs = shape_utils.check_min_image_dim(\n 33, preprocessed_inputs)\n preprocessed_inputs = ops.pad_to_multiple(\n preprocessed_inputs, self._pad_to_multiple)\n batch_size = preprocessed_inputs.shape[0].value / unroll_length\n batch_axis = 0\n nets = []\n\n # Batch processing of mobilenet features.\n with slim.arg_scope(mobilenet_v2.training_scope(\n is_training=self._is_training,\n bn_decay=0.9997)), \\\n slim.arg_scope([mobilenet.depth_multiplier],\n min_depth=self._min_depth, divisible_by=8):\n # Big model.\n net, _ = self.extract_base_features_large(preprocessed_inputs)\n nets.append(net)\n large_base_feature_shape = net.shape\n\n # Small models\n net, _ = self.extract_base_features_small(preprocessed_inputs)\n nets.append(net)\n small_base_feature_shape = net.shape\n if not (large_base_feature_shape[1] == small_base_feature_shape[1] and\n large_base_feature_shape[2] == small_base_feature_shape[2]):\n raise ValueError('Large and Small base network feature map dimension '\n 'not equal!')\n\n with slim.arg_scope(self._conv_hyperparams_fn()):\n with tf.variable_scope('LSTM', reuse=self._reuse_weights):\n output_size = (large_base_feature_shape[1], large_base_feature_shape[2])\n lstm_cell, init_state, step = self.create_lstm_cell(\n batch_size, output_size, state_saver, state_name)\n\n nets_seq = [\n tf.split(net, unroll_length, axis=batch_axis) for net in nets\n ]\n\n net_seq, states_out = rnn_decoder.multi_input_rnn_decoder(\n nets_seq,\n init_state,\n lstm_cell,\n step,\n selection_strategy=self._interleave_method,\n is_training=self._is_training,\n is_quantized=self._is_quantized,\n pre_bottleneck=self._pre_bottleneck,\n flatten_state=self._flatten_state,\n scope=None)\n self._states_out = states_out\n\n batcher_ops = None\n if state_saver is not None:\n self._step = state_saver.state(state_name + '_step')\n batcher_ops = [\n state_saver.save_state(state_name + '_c', states_out[-1][0]),\n state_saver.save_state(state_name + '_h', states_out[-1][1]),\n state_saver.save_state(state_name + '_step', self._step + 1)]\n image_features = {}\n with tf_ops.control_dependencies(batcher_ops):\n image_features['layer_19'] = tf.concat(net_seq, 0)\n\n # SSD layers.\n with tf.variable_scope('FeatureMap'):\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=self._feature_map_layout,\n depth_multiplier=self._depth_multiplier,\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features,\n pool_residual=True)\n return feature_maps.values()\n" ]
[ [ "tensorflow.keras.optimizers.schedules.PolynomialDecay", "tensorflow.constant", "tensorflow.control_dependencies", "tensorflow.cast", "tensorflow.clip_by_global_norm", "tensorflow.no_op", "tensorflow.name_scope", "tensorflow.math.pow" ], [ "tensorflow.train.Example", "numpy.random.randint" ], [ "tensorflow.io.gfile.GFile", "tensorflow.test.main" ], [ "tensorflow.convert_to_tensor", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.train.latest_checkpoint", "tensorflow.range", "tensorflow.io.gfile.exists", "tensorflow.train.Checkpoint", "tensorflow.compat.v2.keras.mixed_precision.experimental.Policy", "tensorflow.compat.v2.summary.create_file_writer", "tensorflow.io.gfile.makedirs", "tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy", "tensorflow.compat.v2.summary.create_noop_writer", "tensorflow.keras.optimizers.Adam", "tensorflow.name_scope", "tensorflow.train.experimental.enable_mixed_precision_graph_rewrite", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ], [ "tensorflow.feature_column.input_layer", "tensorflow.feature_column.embedding_column", "tensorflow.losses.mean_squared_error", "tensorflow.feature_column.categorical_column_with_vocabulary_list", "tensorflow.estimator.Estimator", "tensorflow.shape", "tensorflow.metrics.root_mean_squared_error", "tensorflow.cast", "tensorflow.feature_column.categorical_column_with_hash_bucket", "tensorflow.squeeze", "tensorflow.layers.dense", "tensorflow.train.get_global_step", "tensorflow.feature_column.numeric_column", "tensorflow.logging.set_verbosity", "tensorflow.estimator.EstimatorSpec", "tensorflow.to_float", "tensorflow.feature_column.indicator_column", "tensorflow.app.run" ], [ "tensorflow.not_equal", "tensorflow.matmul", "tensorflow.shape", "tensorflow.reshape", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.gather", "tensorflow.name_scope", "tensorflow.variable_scope", "tensorflow.random_normal_initializer" ], [ "tensorflow.convert_to_tensor", "tensorflow.layers.flatten", "tensorflow.sign", "tensorflow.squeeze", "tensorflow.nn.softsign", "tensorflow.contrib.layers.conv2d", "tensorflow.variable_scope", "tensorflow.random_uniform" ], [ "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.device", "tensorflow.compat.v2.keras.mixed_precision.experimental.Policy", "tensorflow.test.is_built_with_cuda", "tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy", "tensorflow.keras.backend.set_image_data_format", "tensorflow.keras.backend.set_learning_phase", "tensorflow.train.experimental.enable_mixed_precision_graph_rewrite" ], [ "tensorflow.Graph", "numpy.abs", "tensorflow.zeros", "tensorflow.test.main", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.random.rand", "tensorflow.Session", "tensorflow.variable_scope", "tensorflow.random_uniform", "tensorflow.contrib.training.SequenceQueueingStateSaver" ], [ "tensorflow.variable_scope", "tensorflow.squeeze", "tensorflow.truncated_normal_initializer", "tensorflow.reduce_mean" ], [ "tensorflow.global_variables_initializer", "numpy.zeros", "tensorflow.test.main", "tensorflow.zeros" ], [ "tensorflow.concat", "tensorflow.split", "tensorflow.image.resize_images", "tensorflow.identity", "tensorflow.name_scope", "tensorflow.variable_scope", "tensorflow.python.framework.ops.control_dependencies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ffcccc/MachineLearning
[ "78bc9c5df08b14f5d70ad5d6774c74f85a585c7e", "78bc9c5df08b14f5d70ad5d6774c74f85a585c7e" ]
[ "KNN.py", "examples/SVM_TEST.py" ]
[ "\"\"\"\n@Filename: KNN.py\n@Author: Danc1elion\n@Author: ffcccc\n@Create Date: 2019-04-29\n@Update Date: 2019-05-03\n@Description: Implement of KNN\n\"\"\"\n\nimport numpy as np\nimport operator as op\nimport AClassifier\nimport preProcess\n\nclass KNNClassifier(AClassifier.aClassifier):\n def __init__(self, k, norm_type=\"Normalization\"):\n self.k = k\n self.norm_type = \"Normalization\"\n self.x_train = None\n self.y_train = None\n\n '''\n Function: Normalization\n Description: Normalize input data. For vector x, the normalization process is given by\n normalization(x) = (x - min(x))/(max(x) - min(x))\n Input: data dataType: ndarray description: input data\n Output: norm_data dataType: ndarray description: output data after normalization\n '''\n # def Normalization(self, data):\n # # get the max and min value of each column\n # min_value = data.min(axis=0)\n # max_value = data.max(axis=0)\n # diff = max_value - min_value\n # # normalization\n # min_data = np.tile(min_value, (data.shape[0], 1))\n # norm_data = (data - min_data)/np.tile(diff, (data.shape[0], 1))\n # return norm_data\n\n '''\n Function: Standardization\n Description: Standardize input data. For vector x, the normalization process is given by\n Standardization(x) = x - mean(x)/std(x)\n Input: data dataType: ndarray description: input data\n Output: standard_data dataType: ndarray description: output data after standardization\n '''\n # def Standardization(self, data):\n # # get the mean and the variance of each column\n # mean_value = data.mean(axis=0)\n # var_value = data.std(axis=0)\n # standard_data = (data - np.tile(mean_value, (data.shape[0], 1)))/np.tile(var_value, (data.shape[0], 1))\n # return standard_data\n\n '''\n Function: train\n Description: train the model\n Input: train_data dataType: ndarray description: features\n test_data dataType: ndarray description: labels\n Output: self dataType: obj description: \n '''\n def train(self, train_data, train_label):\n if self.norm_type == \"Standardization\":\n train_data = preProcess.Standardization(train_data)\n else:\n train_data = preProcess.Normalization(train_data)\n self.x_train = train_data\n self.y_train = train_label\n return self\n\n '''\n Function: predict\n Description: give the prediction for test data\n Input: test_data dataType: ndarray description: data for testing\n test_abel dataType: ndarray description: labels of train data\n norm_type dataType: string description: type of normalization, default:Normalization\n probability dataType: bool description: if true return label and probability, else return label only\n showResult dataType: bool description: display the prediction result\n Output: results dataType: ndarray description: label or probability\n '''\n def predict(self, test_data):\n # Normalization\n if self.norm_type == \"Standardization\":\n testData = preProcess.Standardization(test_data)\n else:\n testData = preProcess.Normalization(test_data)\n\n test_num = testData.shape[0]\n prediction = np.zeros([test_num, 1])\n probability = np.zeros([test_num, 1])\n # predict each samples in test data\n for i in range(test_num):\n prediction[i], probability[i] = self.calculateDistance(testData[i], self.x_train, self.y_train, self.k)\n\n self.prediction = prediction\n self.probability = probability\n\n return prediction\n\n '''\n Function: calculateDistance\n Description: calcuate the distance between input vector and train data\n Input: input dataType: ndarray description: input vector\n traind_ata dataType: ndarray description: data for training\n train_label dataType: ndarray description: labels of train data\n k dataType: int description: select the first k distances\n Output: prob dataType: float description: max probability of prediction \n label dataType: int description: prediction label of input vector\n '''\n def calculateDistance(self, input, train_data, train_label, k):\n train_num = train_data.shape[0]\n # calcuate the distances\n distances = np.tile(input, (train_num, 1)) - train_data\n distances = distances**2\n distances = distances.sum(axis=1)\n distances = distances**0.5\n\n # get the labels of the first k distances\n disIndex = distances.argsort()\n labelCount = {}\n for i in range(k):\n label = train_label[disIndex[i]]\n labelCount[label] = labelCount.get(label, 0) + 1\n\n prediction = sorted(labelCount.items(), key=op.itemgetter(1), reverse=True)\n label = prediction[0][0]\n prob = prediction[0][1]/k\n return label, prob\n\n '''\n Function: showDetectionResult\n Description: show detection result\n Input: test_data dataType: ndarray description: data for test\n test_label dataType: ndarray description: labels of test data\n Output: accuracy dataType: float description: detection accuarcy\n '''\n # def showDetectionResult(self, test_data, test_label):\n # test_label = np.expand_dims(test_label, axis=1)\n # prediction = self.predict(test_data)\n # accuarcy = sum(prediction == test_label)/len(test_label)\n # return accuarcy\n", "import time,sys,os\r\n# LIB is the parent directory of the directory where program resides.\r\nLIB = os.path.join(os.path.dirname(__file__), '..')\r\nDAT = os.path.join(os.path.dirname(__file__), '..', 'dataset', 'dataset2')\r\nsys.path.insert(0, LIB)\r\nfrom sklearn.svm import SVC\r\nfrom SVM import *\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ntrainData = np.array(pd.read_table(os.path.join(DAT,'train.txt'), header=None, encoding='gb2312', delim_whitespace=True))\r\ntestData = np.array(pd.read_table(os.path.join(DAT,'test.txt'), header=None, encoding='gb2312', delim_whitespace=True))\r\ntrainLabel = trainData[:, -1]\r\ntrainData = np.delete(trainData, -1, axis=1)\r\ntestLabel = testData[:, -1]\r\ntestData = np.delete(testData, -1, axis=1)\r\n\r\ntime_start1 = time.time()\r\nclf1 = SVMClassifier()\r\nclf1.train(trainData, trainLabel)\r\nclf1.predict(testData)\r\nscore1 = clf1.accuracy(testLabel)\r\ntime_end1 = time.time()\r\nprint(\"Accuracy of self-SVM: %f\" % score1)\r\nprint(\"Runtime of self-SVM:\", time_end1-time_start1)\r\n\r\ntime_start = time.time()\r\nclf = SVC()\r\nclf.fit(trainData, trainLabel)\r\nclf.predict(testData)\r\nscore = clf.score(testData, testLabel, sample_weight=None)\r\ntime_end = time.time()\r\nprint(\"Accuracy of SVM: %f\" % score)\r\nprint(\"Runtime of SVM:\", time_end-time_start)\r\n" ]
[ [ "numpy.zeros", "numpy.tile" ], [ "numpy.delete", "sklearn.svm.SVC" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EricLi404/tensorflow
[ "23759800d89f7b5362c338d9a3fd72a6810c3e22", "23759800d89f7b5362c338d9a3fd72a6810c3e22", "be084bd7a4dd241eb781fc704f57bcacc5c9b6dd", "23759800d89f7b5362c338d9a3fd72a6810c3e22", "23759800d89f7b5362c338d9a3fd72a6810c3e22", "be084bd7a4dd241eb781fc704f57bcacc5c9b6dd", "23759800d89f7b5362c338d9a3fd72a6810c3e22", "be084bd7a4dd241eb781fc704f57bcacc5c9b6dd", "be084bd7a4dd241eb781fc704f57bcacc5c9b6dd", "23759800d89f7b5362c338d9a3fd72a6810c3e22" ]
[ "tensorflow/python/kernel_tests/conv_ops_test.py", "tensorflow/python/distribute/tpu_values.py", "tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py", "tensorflow/python/keras/utils/layer_utils.py", "tensorflow/python/distribute/mirrored_strategy.py", "tensorflow/lite/testing/zip_test_utils.py", "tensorflow/python/keras/distribute/keras_save_load_test.py", "tensorflow/python/keras/metrics_functional_test.py", "tensorflow/python/ops/linalg/linear_operator.py", "tensorflow/python/keras/utils/losses_utils.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for convolutional operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\nimport numpy as np\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.layers import convolutional\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import nn_impl\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.util.compat import collections_abc\n\n\ndef GetShrunkInceptionShapes(shrink=10):\n \"\"\"Iterator for smaller versions of convolution shapes in 2015 Inception.\n\n Relative to inception, each depth value is `depth // shrink`.\n\n Args:\n shrink: Factor to shrink each depth value by relative to Inception.\n\n Yields:\n Tuple (input_size, filter_size, out_size, stride, padding), the convolution\n parameters of Inception layers.\n \"\"\"\n input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],\n [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],\n [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],\n [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],\n [4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],\n [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],\n [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],\n [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],\n [4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],\n [4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],\n [4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],\n [4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],\n [4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],\n [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],\n [4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],\n [4, 147, 147, 24]]\n filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],\n [1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],\n [1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],\n [1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],\n [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],\n [3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],\n [3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],\n [1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],\n [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],\n [3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],\n [1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],\n [3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],\n [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],\n [1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],\n [3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],\n [1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],\n [3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],\n [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],\n [1, 1, 24, 64]]\n out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],\n [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],\n [4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],\n [4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],\n [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],\n [4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],\n [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],\n [4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],\n [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],\n [4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],\n [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],\n [4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],\n [4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],\n [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],\n [4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],\n [4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],\n [4, 147, 147, 64]]\n strides = [\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1\n ]\n # Shrink sizes to make the test faster\n for i in input_sizes:\n i[3] //= shrink\n for f in filter_sizes:\n f[2] //= shrink\n f[3] //= shrink\n for o in out_sizes:\n o[3] //= shrink\n # pylint: disable=invalid-name\n VALID = \"VALID\"\n SAME = \"SAME\"\n # pylint: enable=invalid-name\n paddings = [\n SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,\n SAME, SAME, SAME, SAME, VALID, VALID, VALID\n ]\n for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,\n paddings):\n yield i, f, o, s, p\n\n\ndef GetTestConfigs():\n \"\"\"Get all the valid tests configs to run.\n\n Returns:\n all the valid test configs as tuples of data_format and use_gpu.\n \"\"\"\n test_configs = [(\"NHWC\", False), (\"NHWC\", True)]\n if test.is_gpu_available(cuda_only=True):\n # \"NCHW\" format is only supported on CUDA.\n test_configs += [(\"NCHW\", True)]\n return test_configs\n\n\nclass Conv2DTest(test.TestCase):\n\n def _DtypesToTest(self, use_gpu):\n # double datatype is currently not supported for convolution ops\n # on the ROCm platform\n optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]\n if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():\n return [dtypes.float32] + optional_float64\n else:\n # It is important that float32 comes before float16 here,\n # as we will be using its gradients as reference for fp16 gradients.\n return [dtypes.float32, dtypes.float16] + optional_float64\n\n def _CreateNumpyTensor(self, shape):\n total_size = 1\n for s in shape:\n total_size *= s\n return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)\n\n def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,\n strides, padding, data_format, dtype, use_gpu):\n \"\"\"Verifies the output values of the convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [kernel_rows, kernel_cols, input_depth, output_depth].\n dilations: Dilated rate: [col_dilation, row_dilation]\n strides: Stride: [col_stride, row_stride]\n padding: Padding type.\n data_format: Format of the data tensors.\n dtype: Data type for inputs and outputs.\n use_gpu: True if the operations should be run on GPU\n Returns:\n Symbolic tensor value that can be used to execute the computation\n \"\"\"\n x1 = self._CreateNumpyTensor(tensor_in_sizes)\n x2 = self._CreateNumpyTensor(filter_in_sizes)\n\n with test_util.device(use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)\n t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n if isinstance(padding, (list, tuple)):\n padding = [(0, 0)] + padding + [(0, 0)]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n strides = test_util.NHWCToNCHW(strides)\n dilations = test_util.NHWCToNCHW(dilations)\n if isinstance(padding, (list, tuple)):\n padding = test_util.NHWCToNCHW(padding)\n conv = nn_ops.conv2d(\n t1,\n t2,\n dilations=dilations,\n strides=strides,\n padding=padding,\n data_format=data_format)\n self.assertEqual(conv.dtype, dtype)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n\n return conv\n\n def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,\n padding):\n \"\"\"Verifies that CPU and GPU produce the same values.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [kernel_rows, kernel_cols, input_depth, output_depth].\n conv_strides: [row_stride, col_stride] for the convolution;\n padding: Padding type.\n \"\"\"\n x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)\n x2 = np.random.rand(*filter_in_sizes).astype(np.float32)\n\n def _SetupVal(data_format, use_gpu):\n with test_util.device(use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n strides = [1] + conv_strides + [1]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n strides = test_util.NHWCToNCHW(strides)\n conv = nn_ops.conv2d(\n t1, t2, strides=strides, padding=padding, data_format=data_format)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n return conv\n\n tensors = []\n for (data_format, use_gpu) in GetTestConfigs():\n tensors.append(_SetupVal(data_format, use_gpu))\n values = self.evaluate(tensors)\n for i in range(1, len(values)):\n self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)\n\n def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,\n stride, dilation, padding, data_format,\n use_gpu):\n x1 = self._CreateNumpyTensor(tensor_in_sizes)\n x2 = self._CreateNumpyTensor(filter_in_sizes)\n with test_util.device(use_gpu):\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n if isinstance(stride, collections_abc.Iterable):\n strides = list(stride)\n else:\n strides = [stride, stride]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n full_strides = [1, 1] + strides\n full_dilation = [1, 1] + dilation\n else:\n full_strides = [1] + strides + [1]\n full_dilation = [1] + dilation + [1]\n expected = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilation,\n data_format=data_format)\n computed = nn_ops.conv2d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilation,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW\":\n expected = test_util.NCHWToNHWC(expected)\n computed = test_util.NCHWToNHWC(computed)\n return expected, computed\n\n def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,\n padding, dilations, rtol=1e-4):\n expected_results = []\n computed_results = []\n for data_format, use_gpu in GetTestConfigs():\n expected, computed = self._ComputeReferenceDilatedConv(\n tensor_in_sizes, filter_in_sizes, strides, dilations, padding,\n data_format, use_gpu)\n expected_results.append(expected)\n computed_results.append(computed)\n tolerance = 1e-2 if use_gpu else 1e-5\n expected_values = self.evaluate(expected_results)\n computed_values = self.evaluate(computed_results)\n for e_value, c_value in zip(expected_values, computed_values):\n tf_logging.debug(\"expected = %s\", e_value)\n tf_logging.debug(\"actual = %s\", c_value)\n self.assertAllClose(\n e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)\n\n def _VerifyValues(self,\n tensor_in_sizes,\n filter_in_sizes,\n strides,\n padding,\n expected,\n dilations=(1, 1),\n gpu_only=False,\n test_grappler_layout_optimizer=False,\n tol=1e-5,\n fp16_tol=1e-3):\n if gpu_only and not test.is_gpu_available(cuda_only=True):\n return\n tensors = []\n dilations = list(dilations)\n for (data_format, use_gpu) in GetTestConfigs():\n if gpu_only and not use_gpu:\n continue\n dtypes_to_test = self._DtypesToTest(use_gpu)\n if not test_grappler_layout_optimizer and data_format == \"NHWC\":\n dtypes_to_test.append(dtypes.int32)\n for dtype in dtypes_to_test:\n result = self._SetupValuesForDevice(\n tensor_in_sizes,\n filter_in_sizes,\n dilations,\n strides,\n padding,\n data_format,\n dtype,\n use_gpu=use_gpu)\n if test_grappler_layout_optimizer and data_format == \"NHWC\" and use_gpu:\n # Grappler's layout optimizer will not optimize a fetch node, so\n # this identity allows Grappler to optimize the Conv2D node.\n result = array_ops.identity(result)\n tensors.append(result)\n values = self.evaluate(tensors)\n for i in range(len(tensors)):\n conv = tensors[i]\n value = values[i]\n tf_logging.debug(\"expected = %s\", expected)\n tf_logging.debug(\"actual = %s\", value)\n tol_to_use = fp16_tol if value.dtype == np.float16 else tol\n if np.issubdtype(value.dtype, np.integer):\n self.assertAllEqual(np.rint(expected), np.ravel(value))\n else:\n self.assertAllClose(expected, np.ravel(value), atol=tol_to_use,\n rtol=tol_to_use)\n self.assertShapeEqual(value, conv)\n self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype)\n\n def _VerifyExplicitPaddings(self,\n tensor_in_sizes,\n filter_in_sizes,\n strides,\n padding,\n dilations=(1, 1),\n test_grappler_layout_optimizer=False,\n tol=1e-5,\n fp16_tol=1e-3):\n \"\"\"Verifies Conv2D with explicit padding generates correct values.\n\n It does this by comparing with Conv2D without explicit padding. This\n function assumes Conv2D without explicit padding works correctly.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in [batch, input_rows,\n input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\n input_depth, output_depth].\n strides: [row_stride, col_stride] for the convolution;\n padding: Explicit padding amounts.\n dilations: Dilation values\n test_grappler_layout_optimizer: If True, allow the Grappler layout\n optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.\n tol: The absolute and relative tolerance for non-fp16 dtypes.\n fp16_tol: The absolute and relative tolerance for fp16.\n \"\"\"\n input_tensor = self._CreateNumpyTensor(tensor_in_sizes)\n filter_tensor = self._CreateNumpyTensor(filter_in_sizes)\n input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])\n dilations = list(dilations)\n conv2d_result = nn_ops.conv2d(\n input_tensor,\n filter_tensor, [1] + list(strides) + [1],\n \"VALID\",\n dilations=[1] + dilations + [1])\n expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))\n self._VerifyValues(\n tensor_in_sizes,\n filter_in_sizes,\n strides,\n padding,\n expected,\n dilations,\n test_grappler_layout_optimizer=test_grappler_layout_optimizer,\n tol=tol,\n fp16_tol=fp16_tol)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D1x1Filter(self):\n expected_output = [\n 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,\n 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[1, 1, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]\n filter_in_sizes = [1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n conv1 = nn_ops.conv2d(\n x1,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n conv2 = nn_ops.conv2d(\n x2,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(\n conv1,\n self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConvolutionClass2DExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]\n filter_in_sizes = [1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n convolver1 = nn_ops.Convolution(\n input_shape=x1.shape,\n filter_shape=filter_in.shape,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(convolver1.num_batch_dims, 1)\n convolver2 = nn_ops.Convolution(\n input_shape=x2.shape,\n filter_shape=filter_in.shape,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(convolver2.num_batch_dims, 2)\n conv1 = convolver1(x1, filter_in)\n conv2 = convolver2(x2, filter_in)\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(\n conv1,\n self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self):\n tensor_in_sizes_batch = [10, 2, 3, 3]\n tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]\n filter_in_sizes = [1, 1, 3, 3]\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)\n x2 = x1.reshape(tensor_in_sizes_expanded_batch)\n conv1 = nn_ops.convolution(\n x1,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n conv2 = nn_ops.convolution(\n x2,\n filter_in,\n strides=[1, 1],\n padding=\"VALID\")\n self.assertEqual(conv1.shape, tensor_in_sizes_batch)\n self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)\n self.assertAllEqual(\n conv1,\n self.evaluate(conv2).reshape(conv1.shape))\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Filter2x1Dilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 4, 4, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmpty(self):\n expected_output = []\n self._VerifyValues(\n tensor_in_sizes=[0, 2, 3, 3],\n filter_in_sizes=[1, 1, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmptyDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[0, 2, 3, 3],\n filter_in_sizes=[1, 1, 3, 3],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Filter(self):\n # The outputs are computed using third_party/py/IPython/notebook.\n expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D1x2Filter(self):\n # The outputs are computed using third_party/py/IPython/notebook.\n expected_output = [\n 231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,\n 936.0, 1029.0\n ]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[1, 2, 3, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D1x2FilterDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[1, 2, 3, 3],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterStride2(self):\n expected_output = [2271.0, 2367.0, 2463.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[2, 2],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterStride2Same(self):\n expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[2, 2],\n padding=\"SAME\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2FilterStride1x2(self):\n expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]\n self._VerifyValues(\n tensor_in_sizes=[1, 3, 6, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[1, 2],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSmallerThanStrideValid(self):\n expected_output = [65, 95, 275, 305]\n self._VerifyValues(\n tensor_in_sizes=[1, 7, 7, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[3, 3],\n padding=\"VALID\",\n expected=expected_output)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSmallerThanStrideSame(self):\n self._VerifyValues(\n tensor_in_sizes=[1, 3, 3, 1],\n filter_in_sizes=[1, 1, 1, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=[1, 3, 7, 9])\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 1],\n filter_in_sizes=[1, 1, 1, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=[1, 3, 9, 11])\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 1],\n filter_in_sizes=[2, 2, 1, 1],\n strides=[3, 3],\n padding=\"SAME\",\n expected=[44, 28, 41, 16])\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSize(self):\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 2, 1],\n filter_in_sizes=[2, 2, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=[50, 60])\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSizeDilation(self):\n self._VerifyDilatedConvValues(\n tensor_in_sizes=[1, 3, 3, 1],\n filter_in_sizes=[2, 2, 1, 2],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\")\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D0x0Padding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n padding=[[0, 0], [0, 0]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[3, 4, 3, 2],\n filter_in_sizes=[1, 1, 2, 1],\n strides=[2, 2],\n padding=[[0, 0], [0, 0]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D1x1Padding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[2, 2, 2, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 2, 1],\n filter_in_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Padding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 1, 2],\n filter_in_sizes=[2, 1, 2, 1],\n strides=[1, 1],\n padding=[[2, 2], [2, 2]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 1, 2],\n filter_in_sizes=[1, 1, 2, 1],\n strides=[2, 1],\n padding=[[2, 2], [2, 2]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DOnlyBottomPadding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 2],\n strides=[1, 1],\n padding=[[0, 3], [0, 0]], tol=2e-5)\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[2, 2, 4, 3],\n filter_in_sizes=[1, 2, 3, 2],\n strides=[2, 2],\n padding=[[0, 3], [0, 0]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DOnlyTopRightPadding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 3],\n filter_in_sizes=[2, 2, 3, 2],\n strides=[1, 1],\n padding=[[1, 0], [0, 2]],\n tol=5e-5)\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 4, 2],\n filter_in_sizes=[2, 2, 2, 2],\n strides=[1, 3],\n padding=[[1, 0], [0, 2]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DLotsPadding(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 1, 1, 3],\n filter_in_sizes=[2, 2, 3, 3],\n strides=[1, 1],\n padding=[[3, 4], [4, 2]])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 1, 1],\n filter_in_sizes=[2, 2, 1, 3],\n strides=[2, 1],\n padding=[[3, 4], [4, 2]])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2DExplicitPaddingWithDilations(self):\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 3, 2, 1],\n filter_in_sizes=[1, 2, 1, 2],\n strides=[1, 1],\n padding=[[1, 0], [0, 1]],\n dilations=[2, 1])\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[3, 2, 2, 1],\n strides=[1, 1],\n padding=[[2, 1], [1, 2]],\n dilations=[2, 3])\n\n def testConv2DExplicitPaddingWithLayoutOptimizer(self):\n # Test with Grappler's layout optimizer, to ensure the layout optimizer\n # handles explicit padding correctly.\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 3, 2, 1],\n filter_in_sizes=[1, 2, 1, 2],\n strides=[1, 1],\n padding=[[1, 0], [0, 1]],\n dilations=[2, 1],\n test_grappler_layout_optimizer=True)\n\n self._VerifyExplicitPaddings(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[3, 2, 2, 1],\n strides=[1, 1],\n padding=[[2, 1], [1, 2]],\n dilations=[2, 3],\n test_grappler_layout_optimizer=True)\n\n def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,\n strides, padding, data_format, dtype):\n \"\"\"Verify the output of group convolution is equal to a for-loop implementation.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in [batch, input_rows,\n input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\n input_depth, output_depth].\n dilations: Dilated rate: [col_dilation, row_dilation]\n strides: Stride: [col_stride, row_stride]\n padding: Padding type.\n data_format: Format of the data tensors.\n dtype: Data type for inputs and outputs.\n \"\"\"\n tensor_in = self._CreateNumpyTensor(tensor_in_sizes)\n filter_in = self._CreateNumpyTensor(filter_in_sizes)\n num_groups = tensor_in_sizes[3] // filter_in_sizes[2]\n assert num_groups > 1 and \\\n filter_in_sizes[2] * num_groups == tensor_in_sizes[3]\n with test_util.device(True):\n t1 = constant_op.constant(tensor_in, dtype=dtype)\n t2 = constant_op.constant(filter_in, dtype=dtype)\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n if data_format == \"NCHW\":\n t1 = test_util.NHWCToNCHW(t1)\n strides = test_util.NHWCToNCHW(strides)\n dilations = test_util.NHWCToNCHW(dilations)\n t1_splits = array_ops.split(t1, num_groups, axis=1)\n else:\n t1_splits = array_ops.split(t1, num_groups, axis=3)\n t2_splits = array_ops.split(t2, num_groups, axis=3)\n\n def MakeConv2d(inputs, filters):\n return nn_ops.conv2d(\n inputs,\n filters,\n strides,\n padding,\n dilations=dilations,\n data_format=data_format)\n\n group_conv = MakeConv2d(t1, t2)\n group_conv_loop = array_ops.concat(\n [MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],\n axis=1 if data_format == \"NCHW\" else 3)\n\n results = self.evaluate([group_conv, group_conv_loop])\n tol_to_use = 1e-5\n self.assertAllClose(\n results[0], results[1], atol=tol_to_use, rtol=tol_to_use)\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.run_cuda_only\n def testConv2DGroupConvFwd(self):\n for data_format in [\"NHWC\", \"NCHW\"]:\n for dilation in [1, 2]:\n for stride in [1, 2]:\n self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8],\n dilations=[dilation, dilation],\n strides=[stride, stride],\n padding=\"SAME\",\n data_format=data_format,\n dtype=dtypes.float32)\n\n @test_util.deprecated_graph_mode_only\n @test_util.run_cuda_only\n def testInputGradientGroupConv(self):\n for data_format in [\"NCHW\", \"NHWC\"]:\n for test_input in [True, False]:\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n num_groups=2,\n padding=\"VALID\",\n in_depth=4,\n out_depth=6,\n stride_rows=1,\n stride_cols=1,\n test_input=test_input,\n data_format=data_format,\n use_gpu=True,\n max_err=0.005)\n\n @test_util.deprecated_graph_mode_only\n @test_util.run_cuda_only\n def testFilterGradientGroupConv(self):\n for data_format in [\"NCHW\", \"NHWC\"]:\n for test_input in [True, False]:\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n num_groups=2,\n padding=\"VALID\",\n in_depth=4,\n out_depth=6,\n stride_rows=1,\n stride_cols=1,\n test_input=test_input,\n data_format=data_format,\n use_gpu=True,\n max_err=0.005)\n # TODO(yzhwang): this currently fails.\n # self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],\n # filter_in_sizes=[2, 2, 1, 1],\n # strides=[4, 4], padding=\"SAME\",\n # expected=[72, 112, 392, 432])\n\n # Testing for backprops\n def _RunAndVerifyBackpropInput(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu,\n err,\n dilations=(1, 1)):\n if use_gpu and not test.is_gpu_available(cuda_only=True):\n return\n x1 = self._CreateNumpyTensor(filter_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n with test_util.device(use_gpu):\n if len(input_sizes) == 4:\n if data_format == \"NCHW\":\n input_sizes = test_util.NHWCToNCHW(input_sizes)\n t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])\n t1 = constant_op.constant(x1, shape=filter_sizes)\n t2 = constant_op.constant(x2, shape=output_sizes)\n strides = [1] + strides + [1]\n dilations = [1] + dilations + [1]\n if isinstance(padding, (list, tuple)):\n padding = [(0, 0)] + padding + [(0, 0)]\n if data_format == \"NCHW\":\n t2 = test_util.NHWCToNCHW(t2)\n strides = test_util.NHWCToNCHW(strides)\n dilations = test_util.NHWCToNCHW(dilations)\n if isinstance(padding, (list, tuple)):\n padding = test_util.NHWCToNCHW((padding))\n conv = nn_ops.conv2d_backprop_input(\n t0,\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilations=dilations)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n # \"values\" consists of two tensors for two backprops\n value = self.evaluate(conv)\n self.assertShapeEqual(value, conv)\n tf_logging.debug(\"expected = %s\", expected)\n tf_logging.debug(\"actual = %s\", value)\n self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5)\n\n def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,\n conv_strides, padding):\n x1 = np.random.rand(*filter_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(data_format, use_gpu):\n with test_util.device(use_gpu):\n if data_format == \"NCHW\":\n new_input_sizes = test_util.NHWCToNCHW(input_sizes)\n else:\n new_input_sizes = input_sizes\n t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])\n t1 = constant_op.constant(x1, shape=filter_sizes)\n t2 = constant_op.constant(x2, shape=output_sizes)\n strides = [1] + conv_strides + [1]\n if data_format == \"NCHW\":\n t2 = test_util.NHWCToNCHW(t2)\n strides = test_util.NHWCToNCHW(strides)\n conv = nn_ops.conv2d_backprop_input(\n t0,\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format)\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n ret = self.evaluate(conv)\n self.assertShapeEqual(ret, conv)\n return ret\n\n values = []\n for (data_format, use_gpu) in GetTestConfigs():\n values.append(_GetVal(data_format, use_gpu))\n\n for i in range(1, len(values)):\n self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth1ValidBackpropInput(self):\n expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmptyBackpropInput(self):\n expected_output = []\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[0, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[0, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropInput(self):\n expected_output = [\n 14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,\n 140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n # The GPU version of this test is not very stable. So adjusting the\n # error threshold to 1e-4.\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 2, 3, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):\n expected_output = [\n 1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,\n 16.0, 15.0, 20.0, 18.0, 24.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 2, 3, 1],\n strides=[1, 2],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DStrideTwoFilterOneSameBackpropInput(self):\n expected_output = [\n 1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,\n 0.0, 0.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 4, 4, 1],\n filter_sizes=[1, 1, 1, 1],\n output_sizes=[1, 2, 2, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):\n expected_output = [5.0, 11.0, 17.0, 23.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[1, 2, 2, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.run_in_graph_and_eager_modes\n @test_util.disable_xla(\"XLA requires input_sizes to be a 4D shape.\")\n def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self):\n expected_output = [5.0, 11.0, 17.0, 23.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInput(\n input_sizes=[2, 2],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n # Testing for backprops\n def _RunAndVerifyBackpropFilter(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu,\n dilations=(1, 1),\n err=1e-5):\n x0 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n explicit_strides = [1] + strides + [1]\n new_padding = padding\n new_dilations = [1] + dilations + [1]\n if isinstance(new_padding, (list, tuple)):\n new_padding = [(0, 0)] + new_padding + [(0, 0)]\n if data_format == \"NCHW\":\n explicit_strides = test_util.NHWCToNCHW(explicit_strides)\n new_dilations = test_util.NHWCToNCHW(new_dilations)\n if isinstance(padding, (list, tuple)):\n new_padding = test_util.NHWCToNCHW(new_padding)\n for dtype in self._DtypesToTest(use_gpu=use_gpu):\n with test_util.device(use_gpu):\n t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)\n t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])\n t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)\n if data_format == \"NCHW\":\n t0 = test_util.NHWCToNCHW(t0)\n t2 = test_util.NHWCToNCHW(t2)\n conv = nn_ops.conv2d_backprop_filter(\n t0,\n t1,\n t2,\n strides=explicit_strides,\n padding=new_padding,\n dilations=new_dilations,\n data_format=data_format)\n value = self.evaluate(conv)\n self.assertShapeEqual(value, conv)\n tf_logging.debug(\"expected = %s\", expected)\n tf_logging.debug(\"actual = %s\", value)\n self.assertArrayNear(expected, value.flatten(), err)\n\n def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,\n conv_strides, padding):\n x0 = np.random.rand(*input_sizes).astype(np.float32)\n x2 = np.random.rand(*output_sizes).astype(np.float32)\n\n def _GetVal(data_format, use_gpu):\n with test_util.device(use_gpu):\n t0 = constant_op.constant(x0, shape=input_sizes)\n t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])\n t2 = constant_op.constant(x2, shape=output_sizes)\n strides = [1] + conv_strides + [1]\n if data_format == \"NCHW\":\n t0 = test_util.NHWCToNCHW(t0)\n t2 = test_util.NHWCToNCHW(t2)\n strides = test_util.NHWCToNCHW(strides)\n conv = nn_ops.conv2d_backprop_filter(\n t0,\n t1,\n t2,\n strides=strides,\n padding=padding,\n data_format=data_format)\n ret = self.evaluate(conv)\n self.assertShapeEqual(ret, conv)\n return ret\n\n values = []\n for (data_format, use_gpu) in GetTestConfigs():\n values.append(_GetVal(data_format, use_gpu))\n for i in range(1, len(values)):\n self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth1ValidBackpropFilter(self):\n expected = [5.0, 8.0, 14.0, 17.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DEmptyBackpropFilter(self):\n expected = []\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 0],\n output_sizes=[1, 1, 2, 0],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DBackpropFilterWithEmptyInput(self):\n expected = [0, 0, 0, 0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[0, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[0, 1, 2, 1],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropFilter(self):\n expected = [\n 17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,\n 37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,\n 117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,\n 120.0, 153.0\n ]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 3, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):\n expected = [161.0, 182.0, 287.0, 308.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 2, 3, 1],\n strides=[1, 2],\n padding=\"VALID\",\n expected=expected,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DStrideTwoFilterOneSameBackpropFilter(self):\n expected_output = [78.]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 4, 4, 1],\n filter_sizes=[1, 1, 1, 1],\n output_sizes=[1, 2, 2, 1],\n strides=[2, 2],\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes\n def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):\n expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilter(\n input_sizes=[1, 2, 2, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n padding=\"VALID\",\n expected=expected_output,\n data_format=data_format,\n use_gpu=use_gpu)\n\n # Testing for backprops\n def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,\n output_sizes, strides, dilations,\n padding, data_format, use_gpu, err):\n x1 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(filter_sizes)\n default_dilations = (dilations[0] == 1 and dilations[1] == 1)\n if default_dilations or use_gpu:\n with self.cached_session(use_gpu=use_gpu) as sess:\n if data_format == \"NCHW\":\n input_sizes = test_util.NHWCToNCHW(input_sizes)\n t1 = constant_op.constant(x1, shape=input_sizes)\n t2 = constant_op.constant(x2, shape=filter_sizes)\n full_strides = [1] + strides + [1]\n full_dilations = [1] + dilations + [1]\n if data_format == \"NCHW\":\n full_strides = test_util.NHWCToNCHW(full_strides)\n full_dilations = test_util.NHWCToNCHW(full_dilations)\n conv_forward = nn_ops.conv2d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilations,\n padding=padding,\n data_format=data_format)\n conv_forward_2 = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilations,\n data_format=data_format)\n if data_format == \"NCHW\":\n conv_forward = test_util.NCHWToNHWC(conv_forward)\n conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)\n conv = gradients_impl.gradients(conv_forward, t1)[0]\n conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]\n # \"values\" consists of two tensors for two backprops\n value = self.evaluate(conv)\n value_2 = self.evaluate(conv_2)\n self.assertShapeEqual(value, conv)\n self.assertShapeEqual(value_2, conv_2)\n tf_logging.debug(\"expected = %s\", value_2)\n tf_logging.debug(\"actual = %s\", value)\n self.assertArrayNear(value_2.flatten(), value.flatten(), err)\n\n # Testing for backprops\n def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,\n output_sizes, strides, dilations,\n padding, data_format, use_gpu, err):\n x1 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(filter_sizes)\n default_dilations = (dilations[0] == 1 and dilations[1] == 1)\n if default_dilations or use_gpu:\n with self.cached_session(use_gpu=use_gpu) as sess:\n if data_format == \"NCHW\":\n input_sizes = test_util.NHWCToNCHW(input_sizes)\n t1 = constant_op.constant(x1, shape=input_sizes)\n t2 = constant_op.constant(x2, shape=filter_sizes)\n full_strides = [1] + strides + [1]\n full_dilations = [1] + dilations + [1]\n if data_format == \"NCHW\":\n full_strides = test_util.NHWCToNCHW(full_strides)\n full_dilations = test_util.NHWCToNCHW(full_dilations)\n conv_forward = nn_ops.conv2d(\n t1,\n t2,\n strides=full_strides,\n dilations=full_dilations,\n padding=padding,\n data_format=data_format)\n conv_forward_2 = nn_ops.convolution(\n t1,\n t2,\n padding=padding,\n strides=strides,\n dilation_rate=dilations,\n data_format=data_format)\n if data_format == \"NCHW\":\n conv_forward = test_util.NCHWToNHWC(conv_forward)\n conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)\n conv = gradients_impl.gradients(conv_forward, t2)[0]\n conv_2 = gradients_impl.gradients(conv_forward, t2)[0]\n value = self.evaluate(conv)\n value_2 = self.evaluate(conv_2)\n self.assertShapeEqual(value, conv)\n self.assertShapeEqual(value_2, conv_2)\n tf_logging.debug(\"expected = %s\", value_2)\n tf_logging.debug(\"actual = %s\", value)\n self.assertArrayNear(value_2.flatten(), value.flatten(), err)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 5, 1],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DEmptyBackpropFilterDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 0],\n output_sizes=[1, 1, 2, 0],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 3, 4, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterDilation(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 5, 1],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DEmptyBackpropInputDilation1x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[0, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[0, 1, 2, 1],\n strides=[1, 1],\n dilations=[1, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n @test_util.deprecated_graph_mode_only\n def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n # The GPU version of this test is not very stable. So adjusting the\n # error threshold to 1e-4.\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 3, 2, 3],\n filter_sizes=[2, 2, 3, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[1, 1],\n dilations=[2, 1],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n @test_util.deprecated_graph_mode_only\n def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):\n if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputDilation(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 1, 1, 2],\n strides=[1, 1],\n dilations=[2, 2],\n padding=\"VALID\",\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-5)\n\n def _RunAndVerifyBackpropInputExplicitPadding(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n data_format,\n use_gpu,\n dilations=(1, 1),\n err=2e-5):\n if use_gpu and not test.is_gpu_available(cuda_only=True):\n return\n if not use_gpu and dilations != (1, 1):\n return # Non-default dilations is currently not supported on the CPU.\n\n x1 = self._CreateNumpyTensor(filter_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n padded_input_sizes = input_sizes[:]\n padded_input_sizes[1] += padding[0][0] + padding[0][1]\n padded_input_sizes[2] += padding[1][0] + padding[1][1]\n c = nn_ops.conv2d_backprop_input(\n padded_input_sizes,\n x1,\n x2,\n strides=[1] + strides + [1],\n padding=\"VALID\",\n dilations=[1] + dilations + [1])\n c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(\n c.shape[2] - padding[1][1]), :]\n expected = list(self.evaluate(array_ops.reshape(c, [-1])))\n self._RunAndVerifyBackpropInput(\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu=use_gpu,\n err=err,\n dilations=dilations)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding0x0BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=[[0, 0], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 3, 4, 2],\n filter_sizes=[2, 2, 2, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[2, 2],\n padding=[[0, 0], [0, 0]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding1x1BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 3, 4, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 2],\n filter_sizes=[1, 1, 2, 1],\n output_sizes=[1, 4, 3, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 4, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 4, 2, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n dilations=[2, 2], use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding2x2BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[2, 3, 1, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[2, 2, 5, 1],\n strides=[3, 1],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 3, 4, 1],\n strides=[1, 2],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n dilations=[2, 3],\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 10, 8, 1],\n strides=[1, 1],\n padding=[[1, 8], [4, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=5e-5)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 5, 3, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 4, 8, 1],\n strides=[3, 1],\n padding=[[1, 8], [4, 2]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[1, 7, 7, 1],\n strides=[1, 1],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n err=5e-5,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropInputExplicitPadding(\n input_sizes=[1, 4, 2, 1],\n filter_sizes=[3, 3, 1, 1],\n output_sizes=[1, 5, 2, 1],\n strides=[1, 2],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n dilations=[2, 1],\n use_gpu=use_gpu)\n\n def _RunAndVerifyBackpropFilterExplicitPadding(self,\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n data_format,\n use_gpu,\n dilations=(1, 1),\n err=1e-5):\n if use_gpu and not test.is_gpu_available(cuda_only=True):\n return\n if not use_gpu and dilations != (1, 1):\n return # Non-default dilations is currently not supported on the CPU.\n\n x0 = self._CreateNumpyTensor(input_sizes)\n x2 = self._CreateNumpyTensor(output_sizes)\n dilations = list(dilations)\n\n x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], \"constant\")\n c = nn_ops.conv2d_backprop_filter(\n x0,\n filter_sizes,\n x2,\n strides=[1] + strides + [1],\n padding=\"VALID\",\n dilations=[1] + dilations + [1])\n expected = list(self.evaluate(array_ops.reshape(c, [-1])))\n self._RunAndVerifyBackpropFilter(\n input_sizes,\n filter_sizes,\n output_sizes,\n strides,\n padding,\n expected,\n data_format,\n use_gpu=use_gpu,\n dilations=dilations,\n err=err)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding0x0BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 1, 2, 1],\n strides=[1, 1],\n padding=[[0, 0], [0, 0]],\n data_format=data_format, use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 3, 4, 2],\n filter_sizes=[2, 2, 2, 3],\n output_sizes=[1, 1, 2, 3],\n strides=[2, 2],\n padding=[[0, 0], [0, 0]],\n data_format=data_format, use_gpu=use_gpu)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding1x1BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 2],\n output_sizes=[1, 3, 4, 2],\n strides=[1, 1],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=5e-5)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 2],\n filter_sizes=[1, 1, 2, 1],\n output_sizes=[1, 4, 3, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n use_gpu=use_gpu,\n data_format=data_format)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 4, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 4, 2, 1],\n strides=[1, 2],\n padding=[[1, 1], [1, 1]],\n data_format=data_format,\n use_gpu=use_gpu,\n dilations=[2, 2])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding2x2BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[2, 3, 1, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[2, 2, 5, 1],\n strides=[3, 1],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 3, 6, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 3, 4, 1],\n strides=[1, 2],\n padding=[[2, 2], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n dilations=[2, 3])\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 2, 3, 1],\n filter_sizes=[2, 2, 1, 1],\n output_sizes=[1, 10, 8, 1],\n strides=[1, 1],\n padding=[[1, 8], [4, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 5, 3, 1],\n filter_sizes=[3, 2, 1, 1],\n output_sizes=[1, 4, 8, 1],\n strides=[3, 1],\n padding=[[1, 8], [4, 2]],\n use_gpu=use_gpu,\n data_format=data_format)\n\n @test_util.run_in_graph_and_eager_modes()\n def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 3, 3, 1],\n filter_sizes=[2, 1, 1, 1],\n output_sizes=[1, 7, 7, 1],\n strides=[1, 1],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n err=1e-4)\n\n self._RunAndVerifyBackpropFilterExplicitPadding(\n input_sizes=[1, 4, 2, 1],\n filter_sizes=[3, 3, 1, 1],\n output_sizes=[1, 5, 2, 1],\n strides=[1, 2],\n padding=[[5, 0], [2, 2]],\n data_format=data_format,\n use_gpu=use_gpu,\n dilations=[2, 1])\n\n # Gradient checkers\n def ConstructAndTestGradient(self,\n batch,\n input_rows,\n input_cols,\n filter_rows,\n filter_cols,\n in_depth,\n out_depth,\n stride_rows,\n stride_cols,\n padding,\n test_input,\n data_format,\n use_gpu,\n num_groups=1,\n max_err=0.003):\n assert in_depth % num_groups == 0 and out_depth % num_groups == 0\n input_shape = [batch, input_rows, input_cols, in_depth]\n filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]\n # TODO(yangke): re-factor the computation of output shape.\n if padding == \"VALID\":\n output_rows = (input_rows - filter_rows + stride_rows) // stride_rows\n output_cols = (input_cols - filter_cols + stride_cols) // stride_cols\n elif padding == \"SAME\":\n output_rows = (input_rows + stride_rows - 1) // stride_rows\n output_cols = (input_cols + stride_cols - 1) // stride_cols\n else:\n self.assertIsInstance(padding, (list, tuple))\n output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +\n stride_rows) // stride_rows\n output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +\n stride_cols) // stride_cols\n output_shape = [batch, output_rows, output_cols, out_depth]\n input_size = 1\n for x in input_shape:\n input_size *= x\n filter_size = 1\n for x in filter_shape:\n filter_size *= x\n input_data = [x * 1.0 / input_size for x in range(0, input_size)]\n filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]\n # Conv2DGrad functions are not compiled for double due to\n # a problem in the way Eigen's Conv2DGrad works for double.\n # So we disable the DOUBLE path. We should re-enable this\n # when double support returns for CPU and/or GPU.\n for dtype in self._DtypesToTest(use_gpu=use_gpu):\n with self.cached_session(use_gpu=use_gpu):\n input_tensor = constant_op.constant(\n input_data, shape=input_shape, dtype=dtype, name=\"input\")\n filter_tensor = constant_op.constant(\n filter_data, shape=filter_shape, dtype=dtype, name=\"filter\")\n strides = [1, stride_rows, stride_cols, 1]\n new_padding = padding\n if data_format == \"NCHW\":\n new_input_tensor = test_util.NHWCToNCHW(input_tensor)\n strides = test_util.NHWCToNCHW(strides)\n if isinstance(padding, (list, tuple)):\n new_padding = test_util.NHWCToNCHW(padding)\n else:\n new_input_tensor = input_tensor\n conv = nn_ops.conv2d(\n new_input_tensor,\n filter_tensor,\n strides,\n new_padding,\n data_format=data_format,\n name=\"conv\")\n if data_format == \"NCHW\":\n conv = test_util.NCHWToNHWC(conv)\n self.assertEqual(output_shape, conv.get_shape())\n if test_input:\n jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,\n input_shape,\n conv,\n output_shape)\n else:\n jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,\n filter_shape,\n conv,\n output_shape)\n if dtype == dtypes.float32:\n reference_jacob_t = jacob_t\n err = np.fabs(jacob_t - jacob_n).max()\n else:\n # Compare fp16 theoretical gradients to fp32 theoretical gradients,\n # since fp16 numerical gradients are too imprecise.\n err = np.fabs(jacob_t - reference_jacob_t).max()\n\n tf_logging.debug(\"conv_2d gradient error = %s\", err)\n self.assertLess(err, max_err)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientValidPaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientValidPaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientValidPaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=5,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientValidPaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientValidPaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=7,\n input_cols=6,\n filter_rows=3,\n filter_cols=3,\n in_depth=4,\n out_depth=5,\n stride_rows=3,\n stride_cols=3,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientValidPaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=7,\n filter_rows=4,\n filter_cols=4,\n in_depth=2,\n out_depth=3,\n stride_rows=3,\n stride_cols=3,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientSamePaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=7,\n input_cols=6,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"SAME\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientSamePaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=3,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"SAME\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=4,\n input_rows=6,\n input_cols=5,\n filter_rows=2,\n filter_cols=2,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientSamePaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=7,\n input_cols=6,\n filter_rows=3,\n filter_cols=3,\n in_depth=4,\n out_depth=5,\n stride_rows=3,\n stride_cols=3,\n padding=\"SAME\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStrideThree(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=7,\n filter_rows=4,\n filter_cols=4,\n in_depth=2,\n out_depth=3,\n stride_rows=3,\n stride_cols=3,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientSamePaddingStride2x1(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=7,\n filter_rows=4,\n filter_cols=4,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=1,\n padding=\"SAME\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradientKernelSizeMatchesInputSize(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=3,\n filter_rows=4,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradientKernelSizeMatchesInputSize(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=3,\n filter_rows=4,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=\"VALID\",\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient1x1PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu,\n max_err=0.0025)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient1x1PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient1x1PaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=5,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient1x1PaddingStrideTwo(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=4,\n input_cols=5,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=2,\n stride_cols=2,\n padding=[[0, 0], [1, 1], [1, 1], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient2x2PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [2, 2], [2, 2], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu,\n max_err=0.003)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient2x2PaddingStrideOne(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=5,\n input_cols=4,\n filter_rows=3,\n filter_cols=3,\n in_depth=2,\n out_depth=3,\n stride_rows=1,\n stride_cols=1,\n padding=[[0, 0], [2, 2], [2, 2], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu,\n max_err=0.003)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient1_2_3_4PaddingStride3x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=5,\n filter_rows=4,\n filter_cols=2,\n in_depth=3,\n out_depth=2,\n stride_rows=3,\n stride_cols=2,\n padding=[[0, 0], [1, 2], [3, 4], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient1_2_3_4PaddingStride3x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=8,\n input_cols=5,\n filter_rows=4,\n filter_cols=2,\n in_depth=3,\n out_depth=2,\n stride_rows=3,\n stride_cols=2,\n padding=[[0, 0], [1, 2], [3, 4], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient4_3_2_1PaddingStride2x1(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=3,\n input_rows=5,\n input_cols=7,\n filter_rows=3,\n filter_cols=2,\n in_depth=1,\n out_depth=2,\n stride_rows=2,\n stride_cols=1,\n padding=[[0, 0], [4, 3], [2, 1], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient4_3_2_1PaddingStride2x1(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=3,\n input_rows=5,\n input_cols=7,\n filter_rows=3,\n filter_cols=2,\n in_depth=1,\n out_depth=2,\n stride_rows=2,\n stride_cols=1,\n padding=[[0, 0], [4, 3], [2, 1], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testInputGradient0_0_0_5PaddingStride1x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=6,\n input_cols=7,\n filter_rows=3,\n filter_cols=4,\n in_depth=3,\n out_depth=2,\n stride_rows=1,\n stride_cols=2,\n padding=[[0, 0], [0, 0], [0, 5], [0, 0]],\n test_input=True,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testFilterGradient0_0_0_5PaddingStride1x2(self):\n for (data_format, use_gpu) in GetTestConfigs():\n self.ConstructAndTestGradient(\n batch=2,\n input_rows=6,\n input_cols=7,\n filter_rows=3,\n filter_cols=4,\n in_depth=3,\n out_depth=2,\n stride_rows=1,\n stride_cols=2,\n padding=[[0, 0], [0, 0], [0, 5], [0, 0]],\n test_input=False,\n data_format=data_format,\n use_gpu=use_gpu)\n\n @test_util.deprecated_graph_mode_only\n def testShapeFunctionEdgeCases(self):\n # All shapes unknown.\n c1 = nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n self.assertEqual([None, None, None, None], c1.get_shape().as_list())\n\n # Incorrect input shape.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(\n dtypes.float32, shape=[1, 3]),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Incorrect filter shape.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(\n dtypes.float32, shape=[1, 3]),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Depth mismatch.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3]),\n array_ops.placeholder(\n dtypes.float32, shape=[4, 4, 2, 2]),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Input depth divisible by filter depth (group convolution).\n # No exceptions should appear.\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),\n array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),\n strides=[1, 1, 1, 1],\n padding=\"SAME\")\n\n # Negative padding.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [0, -1], [1, 2], [0, 0]])\n\n # Nonzero padding in nonspatial dimension.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[1, 0], [0, 0], [0, 0], [0, 0]])\n\n # Nonzero NCHW padding in nonspatial dimension.\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [0, 1], [0, 0], [0, 0]],\n data_format=\"NCHW\")\n\n # Wrong amount of padding\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [0, 0], [0, 0]])\n\n # Only specify one padding amount per dimension\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[[0], [0], [0], [0]])\n\n # Explicit padding elements are not lists\n with self.assertRaises(ValueError):\n nn_ops.conv2d(\n array_ops.placeholder(dtypes.float32),\n array_ops.placeholder(dtypes.float32),\n strides=[1, 1, 1, 1],\n padding=[0, 0, 0, 0])\n\n @test_util.deprecated_graph_mode_only\n def testOpEdgeCases(self):\n with self.cached_session() as sess:\n # Illegal strides.\n with self.assertRaisesRegex(errors_impl.UnimplementedError,\n \"strides in the batch and depth\"):\n input_placeholder = array_ops.placeholder(dtypes.float32)\n input_val = np.ones([10, 10])\n filter_placeholder = array_ops.placeholder(dtypes.float32)\n filter_val = np.ones([10, 10])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[2, 1, 1, 1],\n padding=\"SAME\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n with self.assertRaisesRegex(errors_impl.UnimplementedError,\n \"strides in the batch and depth\"):\n input_placeholder = array_ops.placeholder(dtypes.float32)\n filter_placeholder = array_ops.placeholder(dtypes.float32)\n input_val = np.ones([10, 10])\n filter_val = np.ones([10, 10])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 2],\n padding=\"SAME\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n\n # Filter larger than input.\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[20, 21, 3, 2])\n filter_val = np.ones([20, 21, 3, 2])\n\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 1],\n padding=\"VALID\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[21, 20, 3, 2])\n filter_val = np.ones([21, 20, 3, 2])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 1],\n padding=\"VALID\"),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n\n # Filter larger than input + padding.\n with self.assertRaisesRegex(ValueError, \"Negative dimension size\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[24, 25, 3, 2])\n filter_val = np.ones([24, 25, 3, 2])\n sess.run(\n nn_ops.conv2d(\n input_placeholder,\n filter_placeholder,\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [2, 2], [2, 2], [0, 0]]),\n feed_dict={\n input_placeholder: input_val,\n filter_placeholder: filter_val\n })\n\n # Negative padding during backprop.\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n \"All elements of explicit_paddings must be nonnegative\"):\n filter_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[18, 18, 3, 2])\n filter_val = np.ones([18, 18, 3, 2])\n out_backprop = array_ops.placeholder(\n dtypes.float32, shape=[32, 3, 2, 2])\n out_backprop_val = np.ones([32, 3, 2, 2])\n sess.run(\n nn_ops.conv2d_backprop_input([32, 20, 20, 3],\n filter_placeholder,\n out_backprop,\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [-1, 0], [0, 0],\n [0, 0]]),\n feed_dict={\n filter_placeholder: filter_val,\n out_backprop: out_backprop_val\n })\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n \"All elements of explicit_paddings must be nonnegative\"):\n input_placeholder = array_ops.placeholder(\n dtypes.float32, shape=[32, 20, 20, 3])\n input_val = np.ones([32, 20, 20, 3])\n out_backprop = array_ops.placeholder(\n dtypes.float32, shape=[32, 3, 2, 2])\n out_backprop_val = np.ones([32, 3, 2, 2])\n sess.run(\n nn_ops.conv2d_backprop_filter(\n input_placeholder, [18, 18, 3, 2],\n out_backprop,\n strides=[1, 1, 1, 1],\n padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]),\n feed_dict={\n input_placeholder: input_val,\n out_backprop: out_backprop_val\n })\n\n\nclass DepthwiseConv2DTest(test.TestCase):\n\n def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,\n expected):\n \"\"\"Verifies the output values of the convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [filter_rows, filter_cols, input_depth, depth_multiplier].\n stride: Stride.\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n \"\"\"\n total_size_1 = 1\n total_size_2 = 1\n for s in tensor_in_sizes:\n total_size_1 *= s\n for s in filter_in_sizes:\n total_size_2 *= s\n # Initializes the input tensor with array containing incrementing\n # numbers from 1.\n x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]\n x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]\n with self.cached_session() as sess:\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t1.set_shape(tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n conv = nn_impl.depthwise_conv2d(\n t1, t2, strides=[1, stride, stride, 1], padding=padding)\n value = self.evaluate(conv)\n tf_logging.debug(\"value = %s\", value)\n self.assertArrayNear(expected, np.ravel(value), 1e-5)\n self.assertShapeEqual(value, conv)\n\n def testConv2D2x2Filter(self):\n # The inputs look like this (it's a 3 x 2 matrix, each of depth 2):\n #\n # [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]\n # [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]\n # We can view this as two inputs\n #\n # input depth 0:\n #\n # [ 1.0, 3.0, 5.0 ]\n # [ 7.0, 9.0, 11.0 ]\n #\n # input depth 1:\n #\n # [ 2.0, 4.0, 6.0 ]\n # [ 8.0, 10.0, 12.0 ]\n #\n # The filter looks like this (it has two 2 x 2 patches, each generating 2\n # depths):\n #\n # filter #0:\n #\n # [ (1.0, 3.0), ( 5.0, 7.0)]\n # [ (9.0, 11.0), (13.0, 15.0)]\n #\n # filter #1:\n #\n # [ ( 2.0, 4.0), ( 6.0, 8.0)]\n # [ (10.0, 12.0), (14.0, 16.0)]\n #\n # So the outputs are:\n #\n # (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196\n # (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216\n # (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272\n # (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296\n #\n # (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)\n # 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252\n # (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)\n # 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280\n # (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)\n # 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344\n # (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)\n # 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376\n expected_output = [196, 216, 272, 296, 252, 280, 344, 376]\n self._VerifyValues(\n tensor_in_sizes=[1, 2, 3, 2],\n filter_in_sizes=[2, 2, 2, 2],\n stride=1,\n padding=\"VALID\",\n expected=expected_output)\n\n\nclass SeparableConv2DTest(test.TestCase):\n\n def _InitValues(self, sizes):\n \"\"\"Initializes values for input tensors.\n\n Args:\n sizes: Tensor dimensions.\n\n Returns:\n Tensor initialized to values.\n \"\"\"\n total_size = 1\n for s in sizes:\n total_size *= s\n x = [f * 0.5 for f in range(1, total_size + 1)]\n return constant_op.constant(x, shape=sizes)\n\n def _VerifyValues(self,\n tensor_in_sizes,\n depthwise_filter_in_sizes,\n pointwise_filter_in_sizes,\n stride,\n padding,\n expected,\n data_format=\"NHWC\"):\n \"\"\"Verifies the output values of the separable convolution function.\n\n Args:\n tensor_in_sizes: Input tensor dimensions.\n depthwise_filter_in_sizes: Depthwise filter tensor dimensions.\n pointwise_filter_in_sizes: Pointwise filter tensor dimensions.\n stride: Stride.\n padding: Padding type.\n expected: An array containing the expected operation outputs.\n data_format: string data format for input tensor.\n \"\"\"\n with self.cached_session(use_gpu=True) as sess:\n t1 = self._InitValues(tensor_in_sizes)\n f1 = self._InitValues(depthwise_filter_in_sizes)\n f1.set_shape(depthwise_filter_in_sizes)\n f2 = self._InitValues(pointwise_filter_in_sizes)\n\n real_t1 = t1\n strides = [1, stride, stride, 1]\n if data_format == \"NCHW\":\n real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])\n strides = [1, 1, stride, stride]\n if isinstance(padding, list):\n padding = [padding[0], padding[3], padding[1], padding[2]]\n\n conv = nn_impl.separable_conv2d(\n real_t1,\n f1,\n f2,\n strides=strides,\n padding=padding,\n data_format=data_format)\n\n if data_format == \"NCHW\":\n conv = array_ops.transpose(conv, [0, 2, 3, 1])\n\n value = self.evaluate(conv)\n tf_logging.debug(\"value = %s\", value)\n self.assertArrayNear(expected, np.ravel(value), 2e-3)\n self.assertShapeEqual(value, conv)\n\n def _testSeparableConv2D(self, data_format):\n # The output is the result of two convolutions:\n # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].\n # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].\n # Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).\n expected_output = [\n 6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,\n 8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,\n 11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,\n 4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,\n 15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,\n 18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,\n 6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,\n 19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,\n 22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,\n 24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,\n 10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,\n 7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,\n 7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,\n 2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75\n ]\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 2],\n depthwise_filter_in_sizes=[2, 2, 2, 3],\n pointwise_filter_in_sizes=[1, 1, 6, 7],\n stride=1,\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format)\n\n def testSeparableConv2D(self):\n self._testSeparableConv2D(\"NHWC\")\n\n def disabledtestSeparableConv2DNCHW(self):\n if not test.is_gpu_available():\n return\n self._testSeparableConv2D(\"NCHW\")\n\n def _testSeparableConv2DEqualInputOutputDepth(self, data_format):\n # The output is the result of two convolutions:\n # First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].\n # Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].\n # Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).\n expected_output = [\n 5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,\n 8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,\n 10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,\n 11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,\n 14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,\n 17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,\n 17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,\n 20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,\n 24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,\n 5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,\n 6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,\n 1923.75, 2007.0, 2090.25, 2173.5\n ]\n\n self._VerifyValues(\n tensor_in_sizes=[1, 4, 4, 2],\n depthwise_filter_in_sizes=[2, 2, 2, 3],\n pointwise_filter_in_sizes=[1, 1, 6, 6],\n stride=1,\n padding=\"SAME\",\n expected=expected_output,\n data_format=data_format)\n\n @test_util.deprecated_graph_mode_only\n def testSeparableConv2DEqualInputOutputDepth(self):\n self._testSeparableConv2DEqualInputOutputDepth(\"NHWC\")\n\n def testSeparableConv2DEqualInputOutputDepthNCHW(self):\n if not test.is_gpu_available():\n return\n self._testSeparableConv2DEqualInputOutputDepth(\"NCHW\")\n\n def _testSeparableConv2dExplicitPadding(self, data_format):\n tensor_in_sizes = [1, 4, 4, 2]\n depthwise_filter_in_sizes = [2, 2, 2, 3]\n pointwise_filter_in_sizes = [1, 1, 6, 7]\n padding = [[0, 0], [1, 2], [3, 4], [0, 0]]\n with self.cached_session(use_gpu=True):\n # Compute the 'expected' values by manually padding before calling\n # separable_conv2d\n t1 = self._InitValues(tensor_in_sizes)\n t1 = array_ops.pad(t1, padding)\n f1 = self._InitValues(depthwise_filter_in_sizes)\n f1.set_shape(depthwise_filter_in_sizes)\n f2 = self._InitValues(pointwise_filter_in_sizes)\n conv = nn_impl.separable_conv2d(\n t1,\n f1,\n f2,\n strides=[1, 1, 1, 1],\n padding=\"VALID\",\n data_format=\"NHWC\")\n expected = self.evaluate(conv)\n expected = np.ravel(expected)\n self._VerifyValues(\n tensor_in_sizes=tensor_in_sizes,\n depthwise_filter_in_sizes=depthwise_filter_in_sizes,\n pointwise_filter_in_sizes=pointwise_filter_in_sizes,\n stride=1,\n padding=padding,\n expected=expected,\n data_format=data_format)\n\n def testSeparableConv2dExplicitPadding(self):\n self._testSeparableConv2dExplicitPadding(\"NHWC\")\n\n def testSeparableConv2dExplicitPaddingNCHW(self):\n if not test.is_gpu_available():\n return\n self._testSeparableConv2dExplicitPadding(\"NCHW\")\n\n\nclass DeepConv2DTest(test.TestCase):\n\n def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,\n padding):\n \"\"\"Verifies that DeepConv2D and Conv2D produce the same values.\n\n Args:\n tensor_in_sizes: Input tensor dimensions in\n [batch, input_rows, input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in\n [kernel_rows, kernel_cols, input_depth, output_depth].\n conv_strides: [row_stride, col_stride] for the convolution;\n padding: Padding type.\n \"\"\"\n x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)\n x2 = np.random.rand(*filter_in_sizes).astype(np.float32)\n\n with self.cached_session(use_gpu=False) as sess:\n t1 = constant_op.constant(x1, shape=tensor_in_sizes)\n t2 = constant_op.constant(x2, shape=filter_in_sizes)\n strides = [1] + conv_strides + [1]\n\n conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)\n\n os.environ[\"TF_USE_DEEP_CONV2D\"] = \"0\"\n values_expect = self.evaluate([conv])\n\n os.environ[\"TF_USE_DEEP_CONV2D\"] = \"1\"\n values_test = self.evaluate([conv])\n\n self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)\n\n def _RunTestCases(self, conv_strides, padding):\n input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],\n [2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]\n filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],\n [3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]\n for input_shape, filter_shape in zip(input_sizes, filter_sizes):\n self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)\n\n def testConv2D3x3FilterStride1x1Valid(self):\n self._RunTestCases([1, 1], \"VALID\")\n\n def testConv2D3x3FilterStride1x1Same(self):\n self._RunTestCases([1, 1], \"SAME\")\n\n\nclass Conv2DBenchmark(test.Benchmark):\n\n def benchmarkGPUConvStackFirst(self):\n # Benchmark the first iteration of a conv-net with many identical conv\n # operations.\n if not test.is_gpu_available():\n return\n\n with ops.Graph().as_default(), session_lib.Session() as session:\n batch_size = 1\n timesteps = 600\n features = 1\n\n inputs = random_ops.random_uniform(\n [batch_size, 1, timesteps, features], seed=1234)\n num_outputs_list = [512] * 40 + [1]\n kernel_w = 3\n x = inputs\n for num_outputs in num_outputs_list:\n x = convolutional.conv2d(x, num_outputs, [1, kernel_w])\n outputs = x\n\n self.evaluate(variables.global_variables_initializer())\n num_iterations = 4\n for iter_index in xrange(num_iterations):\n start = time.time()\n session.run(outputs)\n wall_time = time.time() - start\n self.report_benchmark(\n name=\"conv_stack_iter_%d\" % iter_index, wall_time=wall_time)\n tf_logging.info(\"conv_stack_iter_%d: %.4f\" % (iter_index, wall_time))\n\n def _bench_op(self, name, op, burn_iters, num_iters):\n config = config_pb2.ConfigProto()\n # Prevent Grappler from optimizing away the entire graph.\n config.graph_options.rewrite_options.dependency_optimization = (\n rewriter_config_pb2.RewriterConfig.OFF)\n with session_lib.Session(config=config) as session:\n self.evaluate(variables.global_variables_initializer())\n self.run_op_benchmark(\n session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)\n\n def benchmarkExplicitVsManualPadding(self):\n \"\"\"Compare performance of EXPLICIT padding and calling tf.pad.\n\n A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same\n padding followed by an equivalent Conv2D op is benchmarked.\n \"\"\"\n if not test.is_gpu_available():\n return\n\n with ops.Graph().as_default():\n burn_iters = 15\n num_iters = 300\n batch_size = 64\n # The input and filter correspond to the first layer of Resnet50.\n input = variables.Variable( # pylint: disable=redefined-builtin\n random_ops.random_uniform([\n batch_size,\n 3,\n 224,\n 224\n ]))\n filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin\n strides = [1, 1, 2, 2]\n padding = [(0, 0), (0, 0), (3, 3), (3, 3)]\n output_explicit_pad = nn_ops.conv2d(\n input, filter, strides, padding=padding, data_format=\"NCHW\")\n input_padded = array_ops.pad(input, padding)\n output_manual_pad = nn_ops.conv2d(\n input_padded, filter, strides, padding=\"VALID\", data_format=\"NCHW\")\n # Benchmark just the forward pass.\n self._bench_op(\"explicit_pad_forward\", output_explicit_pad.op, burn_iters,\n num_iters)\n self._bench_op(\"manual_pad_forward\", output_manual_pad.op, burn_iters,\n num_iters)\n\n # Benchmark both the forward and backwards passes.\n input_grad_explicit_pad, filter_grad_explicit_pad = (\n gradients_impl.gradients(output_explicit_pad, [input, filter]))\n self._bench_op(\n \"explicit_pad_backward\",\n control_flow_ops.group(input_grad_explicit_pad,\n filter_grad_explicit_pad), burn_iters,\n num_iters)\n input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients(\n output_manual_pad, [input, filter])\n self._bench_op(\n \"manual_pad_backward\",\n control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad),\n burn_iters, num_iters)\n\n def benchmarkExplicitVsSamePaddingGraph(self):\n \"\"\"Compare performance of EXPLICIT and SAME padding in graph mode.\n\n A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op\n with explicit padding is benchmarked, where the padding is the same as in\n the SAME case. The purpose is to ensure EXPLICIT padding is just as\n efficient as the SAME case\n \"\"\"\n if not test.is_gpu_available():\n return\n\n with ops.Graph().as_default():\n burn_iters = 15\n num_convs = 20\n num_iters = 50\n batch_size = 64\n # The input and filter correspond to a middle layer of Resnet50.\n input = variables.Variable( # pylint: disable=redefined-builtin\n random_ops.random_uniform([\n batch_size,\n 256,\n 14,\n 14\n ]))\n filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin\n strides = [1, 1, 1, 1]\n padding = [(0, 0), (0, 0), (1, 1), (1, 1)]\n output_explicit_pad = input\n output_same_pad = input\n\n for _ in range(num_convs):\n output_explicit_pad = nn_ops.conv2d(\n output_explicit_pad,\n filter,\n strides,\n padding=padding,\n data_format=\"NCHW\")\n output_same_pad = nn_ops.conv2d(\n output_same_pad,\n filter,\n strides,\n padding=\"SAME\",\n data_format=\"NCHW\")\n grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter)\n grad_same_pad, = gradients_impl.gradients(output_same_pad, filter)\n self._bench_op(\"graph_explicit_pad\", grad_explicit_pad.op, burn_iters,\n num_iters)\n self._bench_op(\"graph_same_pad\", grad_same_pad.op, burn_iters, num_iters)\n\n def benchmarkExplicitVsSamePaddingEager(self):\n \"\"\"Compare performance of EXPLICIT and SAME padding in eager mode.\n\n A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op\n with explicit padding is benchmarked, where the padding is the same as in\n the SAME case. Currently, EXPLICIT padding is slightly slower, due to the\n fact the Python padding list must be checked and processed before the Conv2D\n op can run.\n \"\"\"\n # TODO(reedwm): Make EXPLICIT padding as fast as SAME padding.\n if not test.is_gpu_available():\n return\n\n with context.eager_mode():\n burn_iters = 15\n num_convs = 20\n num_iters = 50\n batch_size = 64\n # The input and filter correspond to a middle layer of Resnet50.\n input = variables.Variable( # pylint: disable=redefined-builtin\n random_ops.random_uniform([\n batch_size,\n 256,\n 14,\n 14\n ]))\n filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin\n strides = [1, 1, 1, 1]\n padding = [(0, 0), (0, 0), (1, 1), (1, 1)]\n output_explicit_pad = input\n output_same_pad = input\n for _ in range(burn_iters):\n output_explicit_pad = nn_ops.conv2d(\n output_explicit_pad,\n filter,\n strides,\n padding=padding,\n data_format=\"NCHW\")\n output_same_pad = nn_ops.conv2d(\n output_same_pad,\n filter,\n strides,\n padding=\"SAME\",\n data_format=\"NCHW\")\n\n start = time.time()\n for _ in range(num_iters):\n with backprop.GradientTape() as tape:\n for _ in range(num_convs):\n output_explicit_pad = nn_ops.conv2d(\n output_explicit_pad,\n filter,\n strides,\n padding=padding,\n data_format=\"NCHW\")\n tape.gradient(output_explicit_pad, filter)\n end = time.time()\n self.report_benchmark(\n name=\"eager_explicit_pad\",\n wall_time=(end - start) / num_iters,\n iters=num_iters)\n\n start = time.time()\n for _ in range(num_iters):\n with backprop.GradientTape() as tape:\n for _ in range(num_convs):\n output_same_pad = nn_ops.conv2d(\n output_same_pad,\n filter,\n strides,\n padding=\"SAME\",\n data_format=\"NCHW\")\n tape.gradient(output_same_pad, filter)\n end = time.time()\n self.report_benchmark(\n name=\"eager_same_pad\",\n wall_time=(end - start) / num_iters,\n iters=num_iters)\n\n\ndef GetInceptionFwdTest(input_size, filter_size, stride, padding,\n gpu_only=False):\n\n def Test(self):\n if gpu_only and not test.is_gpu_available():\n tf_logging.info(\"Skipping InceptionFwd %s\", (input_size, filter_size,\n stride, padding))\n return\n tf_logging.info(\"Testing InceptionFwd %s\", (input_size, filter_size, stride,\n padding))\n self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)\n\n return Test\n\n\ndef GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):\n\n def Test(self):\n if stride == 1:\n tf_logging.info(\"Testing InceptionFwd with dilations %s\",\n (input_size, filter_size, stride, padding))\n self._VerifyDilatedConvValues(\n tensor_in_sizes=input_size,\n filter_in_sizes=filter_size,\n strides=[stride, stride],\n dilations=[2, 2],\n padding=padding,\n rtol=5e-4)\n\n return Test\n\n\ndef GetInceptionBackInputTest(input_size, filter_size, output_size, stride,\n padding,\n gpu_only=False):\n\n def Test(self):\n if gpu_only and not test.is_gpu_available():\n tf_logging.info(\"Skipping InceptionBackInput %s\",\n (input_size, filter_size, output_size, stride, padding))\n return\n tf_logging.info(\"Testing InceptionBackInput %s\",\n (input_size, filter_size, output_size, stride, padding))\n self._CompareBackpropInput(input_size, filter_size, output_size,\n [stride, stride], padding)\n\n return Test\n\n\ndef GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,\n padding, gpu_only=False):\n\n def Test(self):\n if gpu_only and not test.is_gpu_available():\n tf_logging.info(\"Skipping InceptionBackFilter %s\",\n (input_size, filter_size, output_size, strides, padding))\n return\n tf_logging.info(\"Testing InceptionBackFilter %s\",\n (input_size, filter_size, output_size, strides, padding))\n self._CompareBackFilter(input_size, filter_size, output_size, strides,\n padding)\n\n return Test\n\n\nif __name__ == \"__main__\":\n for index, (input_size_, filter_size_, output_size_, stride_,\n padding_) in enumerate(GetShrunkInceptionShapes()):\n setattr(Conv2DTest, \"testInceptionFwd_\" + str(index),\n test_util.run_in_graph_and_eager_modes(\n GetInceptionFwdTest(input_size_, filter_size_, stride_,\n padding_)))\n setattr(\n Conv2DTest, \"testInceptionFwdDilatedConv_\" + str(index),\n test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(\n input_size_, filter_size_, stride_, padding_)))\n setattr(Conv2DTest, \"testInceptionBackInput_\" + str(index),\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackInputTest(input_size_, filter_size_,\n output_size_, stride_, padding_)))\n setattr(Conv2DTest, \"testInceptionBackFilter_\" + str(index),\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackFilterTest(input_size_, filter_size_,\n output_size_, [stride_, stride_],\n padding_)))\n\n # TODO(b/35359731)\n # Fwd, BckInput, and BackFilter to test that for certain input parameter\n # set, winograd nonfused algorithm will be excluded from conv autotune. If\n # in such case, winograd nonfused algorithm is added as one option of the\n # conv autotune, and cuDNN version is smaller than 7, the following tests\n # will fail.\n ishape = [1, 400, 400, 1]\n fshape = [1, 1, 1, 256]\n oshape = [1, 400, 400, 256]\n setattr(Conv2DTest, \"testInceptionFwd_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionFwdTest(ishape, fshape, 1, \"SAME\", gpu_only=True)))\n setattr(Conv2DTest, \"testInceptionFwdDilatedConv_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionFwdDilatedConvTest(ishape, fshape, 1, \"SAME\")))\n setattr(Conv2DTest, \"testInceptionBackInput_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackInputTest(ishape, fshape, oshape, 1, \"SAME\",\n gpu_only=True)))\n setattr(Conv2DTest, \"testInceptionBackFilter_No_Winograd_Nonfused\",\n test_util.run_in_graph_and_eager_modes(\n GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], \"SAME\",\n gpu_only=True)))\n test.main()\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Various classes representing TPU distributed values.\n\nNote that the tests are in values_test.py .\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\n\nfrom tensorflow.python.distribute import packed_distributed_variable as packed\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute import values_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_resource_variable_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.tpu import tpu\n\n\[email protected]\ndef _maybe_enter_graph(tensor):\n # Note: might have an eager tensor but not be executing eagerly when\n # building functions.\n if (context.executing_eagerly() or isinstance(tensor, ops.EagerTensor) or\n ops.has_default_graph()):\n yield\n else:\n with tensor.graph.as_default():\n yield\n\n\[email protected]\ndef _maybe_on_device(var):\n # Add a device scope for packed variables.\n if isinstance(var, packed.PackedVarAndDevice):\n with ops.device(var.device):\n yield\n else:\n yield\n\n\ndef _make_raw_assign_fn(raw_assign_fn): # pylint: disable=missing-docstring\n\n def assign_fn(var, value, use_locking=False, name=None, read_value=True): # pylint: disable=missing-docstring\n del use_locking # Unused.\n\n handle = var.handle\n with _maybe_enter_graph(handle), _maybe_on_device(var):\n op = raw_assign_fn(\n handle,\n ops.convert_to_tensor(value, dtype=var.dtype),\n name=name)\n with ops.control_dependencies([op]):\n return var._read_variable_op() if read_value else op # pylint: disable=protected-access\n\n return assign_fn\n\n\nclass TPUVariableMixin(object):\n \"\"\"Mixin for TPU variables.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TPUVariableMixin, self).__init__(*args, **kwargs)\n\n # Handle ID is needed for `get_replicated_var_handle` to cache the variables\n # correctly since in eager mode different variables can have the same name.\n if ops.executing_eagerly_outside_functions():\n self._handle_id = self._common_name + \"_\" + str(id(self._primary))\n else:\n self._handle_id = self._common_name\n\n def __getattr__(self, name):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).__getattr__(name)\n else:\n raise AttributeError(\n \"'{}' not accessible within a TPU context.\".format(name))\n\n def get(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).get()\n else:\n raise NotImplementedError(\n \"`TPUVariableMixin.get()` is not supported within a TPU context.\")\n\n def _get_as_operand(self):\n return self.read_value()\n\n def _is_mirrored(self):\n raise NotImplementedError(\n \"`TPUVariableMixin._is_mirrored()` must be implemented by subclasses.\")\n\n @property\n def handle(self):\n \"\"\"The handle by which this variable can be accessed.\"\"\"\n # If we're in a tpu.rewrite(), return the replicated handle.\n tpu_context = enclosing_tpu_context()\n if tpu_context is None or context.executing_eagerly():\n return self._get_on_device_or_primary().handle\n else:\n is_packed = self._packed_var is not None\n val = self._values\n if is_packed:\n val = [self._packed_var]\n\n return tpu_context.get_replicated_var_handle(self._handle_id, val,\n self._is_mirrored(),\n is_packed)\n\n @property\n def device(self):\n return self.handle.device\n\n def _read_variable_op(self):\n \"\"\"Reads the value of this variable.\"\"\"\n if self.trainable:\n tape.variable_accessed(self)\n\n handle = self.handle\n if getattr(handle, \"is_packed\", False):\n # Add a device scope for a packed variable handle.\n with ops.device(self._get_on_device_or_primary().device):\n return gen_resource_variable_ops.read_variable_op(handle, self.dtype)\n else:\n return gen_resource_variable_ops.read_variable_op(handle, self.dtype)\n\n def read_value(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).read_value()\n else:\n return self._read_variable_op()\n\n def value(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self).value()\n else:\n return self._read_variable_op()\n\n def _as_graph_element(self):\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self)._as_graph_element() # pylint: disable=protected-access\n else:\n return None\n\n @property\n def op(self):\n if values_util.is_saving_non_distributed():\n return self._primary.op\n return values.DistributedVarOp(self._primary.op.name,\n self._primary.op.graph,\n self._primary.op.traceback,\n self._primary.op.type)\n\n def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n \"\"\"Converts a variable to a tensor.\"\"\"\n # pylint: disable=protected-access\n if enclosing_tpu_context() is None:\n return super(TPUVariableMixin, self)._dense_var_to_tensor(\n dtype=dtype, name=name, as_ref=as_ref)\n # pylint: enable=protected-access\n elif dtype is not None and dtype != self.dtype:\n return math_ops.cast(self.read_value(), dtype)\n else:\n return self.handle if as_ref else self.read_value()\n\n\ndef enclosing_tpu_context():\n \"\"\"Returns the TPUReplicateContext, which exists inside a tpu.rewrite().\"\"\"\n graph = ops.get_default_graph()\n while graph is not None:\n # pylint: disable=protected-access\n context_ = graph._get_control_flow_context()\n # pylint: enable=protected-access\n while context_ is not None:\n if isinstance(context_, tpu.TPUReplicateContext):\n return context_\n context_ = context_.outer_context\n # This may be a FuncGraph due to defuns or v2 control flow. We need to\n # find the original graph with the XLAControlFlowContext.\n graph = getattr(graph, \"outer_graph\", None)\n return None\n\n\nclass TPUDistributedVariable(TPUVariableMixin, values.DistributedVariable):\n \"\"\"DistributedVariable subclass for TPUStrategy.\"\"\"\n\n def _is_mirrored(self):\n self._policy._is_mirrored() # pylint: disable=protected-access\n\n def assign_sub(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_sub(value, use_locking, name, read_value)\n return self._policy.assign_sub(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def assign_add(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign_add(value, use_locking, name, read_value)\n return self._policy.assign_add(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n if values_util.is_saving_non_distributed():\n return self._primary.assign(value, use_locking, name, read_value)\n return self._policy.assign(\n self, value, use_locking=use_locking, name=name, read_value=read_value)\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_sub(sparse_delta, use_locking, name)\n return self._policy.scatter_sub(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_add(sparse_delta, use_locking, name)\n return self._policy.scatter_add(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_mul(sparse_delta, use_locking, name)\n return self._policy.scatter_mul(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_div(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_div(sparse_delta, use_locking, name)\n return self._policy.scatter_div(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_min(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_min(sparse_delta, use_locking, name)\n return self._policy.scatter_min(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_max(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_max(sparse_delta, use_locking, name)\n return self._policy.scatter_max(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_update(sparse_delta, use_locking, name)\n return self._policy.scatter_update(\n self, sparse_delta, use_locking=use_locking, name=name)\n\n\nclass TPUMirroredVariable(TPUVariableMixin, values.MirroredVariable):\n \"\"\"Holds a map from replica to TPU variables whose values are kept in sync.\"\"\"\n\n def assign_sub(self, value, use_locking=False, name=None,\n read_value=True):\n if (enclosing_tpu_context() and\n self.aggregation == variable_scope.VariableAggregation.NONE):\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(\n self,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_sub(self, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign_add(self, value, use_locking=False, name=None,\n read_value=True):\n if (enclosing_tpu_context() and\n self.aggregation == variable_scope.VariableAggregation.NONE):\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(\n self,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_add(self, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n if (enclosing_tpu_context() and\n self.aggregation == variable_scope.VariableAggregation.NONE):\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_variable_op)(\n self,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign(self, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def scatter_sub(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_sub(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_add(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_max(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_min(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_mul(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_div(*args, **kwargs)\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n if values_util.is_saving_non_distributed():\n return self._primary.scatter_update(*args, **kwargs)\n raise NotImplementedError\n\n def _is_mirrored(self):\n return True\n\n\nclass TPUSyncOnReadVariable(TPUVariableMixin, values.SyncOnReadVariable):\n \"\"\"Holds a map from replica to variables whose values are reduced on save.\"\"\"\n\n def assign_sub(self, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return values.SyncOnReadVariable.assign_sub(self, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(self, *args,\n **kwargs)\n\n def assign_add(self, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return values.SyncOnReadVariable.assign_add(self, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(self, *args,\n **kwargs)\n\n def assign(self, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return values.SyncOnReadVariable.assign(self, *args, **kwargs)\n else:\n return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(\n self, *args, **kwargs)\n\n def _is_mirrored(self):\n return False\n\n\n# Common method between AutoPolicy, OnWrite and Mirrored variables.\ndef assign_sub(var, value, use_locking=False, name=None, read_value=True):\n assign_sub_fn = _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)\n return var._update( # pylint: disable=protected-access\n update_fn=assign_sub_fn,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n\n\ndef assign_add(var, value, use_locking=False, name=None, read_value=True):\n assign_add_fn = _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)\n return var._update( # pylint: disable=protected-access\n update_fn=assign_add_fn,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n\n\ndef assign(var, value, use_locking=False, name=None, read_value=True):\n assign_fn = _make_raw_assign_fn(\n gen_resource_variable_ops.assign_variable_op)\n return var._update( # pylint: disable=protected-access\n update_fn=assign_fn,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n\n\nclass TPUAutoPolicy(values.AutoPolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.AUTO` synchronization.\n\n This policy is created when `synchronization` is set to\n `tf.VariableSynchronization.AUTO` and `aggregation` is set to\n `tf.VariableAggregation.NONE` when creating a `tf.Variable` in `tf.distribute`\n scope.\n \"\"\"\n\n def assign_sub(self, var, value, use_locking=False, name=None,\n read_value=True):\n if enclosing_tpu_context():\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(\n var,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_sub(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign_add(self, var, value, use_locking=False, name=None,\n read_value=True):\n if enclosing_tpu_context():\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(\n var,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign_add(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign(self, var, value, use_locking=False, name=None, read_value=True):\n if enclosing_tpu_context():\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_variable_op)(\n var,\n value=value,\n use_locking=use_locking,\n name=name,\n read_value=read_value)\n return assign(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def scatter_sub(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n raise NotImplementedError\n\n def _is_mirrored(self):\n return True\n\n\nclass TPUOnWritePolicy(values.OnWritePolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.ON_WRITE` synchronization.\n\n This policy is created when the following `synchronization` and\n `aggregation` parameters are specified when creating a `tf.Variable` in\n `tf.distribute` scope:\n * `synchronization` is equal to `tf.VariableSynchronization.AUTO` and\n aggregation can be any of the following `tf.VariableAggregation` enum\n values such as `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.\n * `synchronization` is equal to `tf.VariableSynchronization.ON_WRITE` and\n aggregation can be any of the following `tf.VariableAggregation` enum\n values such as `NONE`, `SUM`, `MEAN` or `ONLY_FIRST_REPLICA`.\n \"\"\"\n\n def assign_sub(self, var, value, use_locking=False, name=None,\n read_value=True):\n return assign_sub(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign_add(self, var, value, use_locking=False, name=None,\n read_value=True):\n return assign_add(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def assign(self, var, value, use_locking=False, name=None, read_value=True):\n return assign(var, value, use_locking=use_locking, name=name,\n read_value=read_value)\n\n def scatter_sub(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n raise NotImplementedError\n\n def _is_mirrored(self):\n return True\n\n\nclass TPUOnReadPolicy(values.OnReadPolicy):\n \"\"\"Policy defined for `tf.VariableSynchronization.ON_READ` synchronization.\n\n This policy is created when `synchronization` is set to\n `tf.VariableSynchronization.ON_READ` and `aggregation` is set to any of the\n values allowed by the `tf.VariableAggregation` enum such as `NONE`, `SUM`,\n `MEAN` or `ONLY_FIRST_REPLICA`when creating a `tf.Variable` in `tf.distribute`\n scope.\n \"\"\"\n\n def assign_sub(self, var, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return super(TPUOnReadPolicy, self).assign_sub(var, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_sub_variable_op)(var, *args,\n **kwargs)\n\n def assign_add(self, var, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return super(TPUOnReadPolicy, self).assign_add(var, *args, **kwargs)\n else:\n return _make_raw_assign_fn(\n gen_resource_variable_ops.assign_add_variable_op)(var, *args,\n **kwargs)\n\n def assign(self, var, *args, **kwargs):\n if enclosing_tpu_context() is None:\n return super(TPUOnReadPolicy, self).assign(var, *args, **kwargs)\n else:\n return _make_raw_assign_fn(gen_resource_variable_ops.assign_variable_op)(\n var, *args, **kwargs)\n\n def _is_mirrored(self):\n return False\n\n def scatter_sub(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_add(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_max(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_min(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_mul(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_div(self, *args, **kwargs):\n raise NotImplementedError\n\n def scatter_update(self, *args, **kwargs):\n raise NotImplementedError\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks using custom training loop on MNIST dataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport timeit\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow.python.keras.benchmarks import distribution_util\n\n\nclass CustomMnistBenchmark(tf.test.Benchmark):\n \"\"\"Benchmarks for custom training loop using `tf.test.Benchmark`.\"\"\"\n\n def __init__(self):\n super(CustomMnistBenchmark, self).__init__()\n self.num_classes = 10\n self.input_shape = (28, 28, 1)\n self.epochs = 15\n (x_train, y_train), _ = tf.keras.datasets.mnist.load_data()\n x_train = x_train.astype('float32') / 255\n x_train = np.expand_dims(x_train, -1)\n y_train = tf.keras.utils.to_categorical(y_train, self.num_classes)\n self.num_examples = x_train.shape[0]\n # Use `tf.data.Dataset` for custom training loop.\n self.train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n\n def _build_model(self):\n \"\"\"Model from https://keras.io/examples/vision/mnist_convnet/.\"\"\"\n model = tf.keras.Sequential([\n tf.keras.Input(shape=self.input_shape),\n tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(self.num_classes, activation='softmax'),\n ])\n\n return model\n\n def compute_loss(self, targets, predictions, loss_fn, batch_size):\n \"\"\"Compute average loss.\"\"\"\n per_example_loss = loss_fn(targets, predictions)\n return tf.nn.compute_average_loss(\n per_example_loss, global_batch_size=batch_size)\n\n @tf.function(experimental_relax_shapes=True)\n def train_step(self, inputs, model, loss_fn, optimizer, batch_size):\n \"\"\"Compute loss and optimize model by optimizer.\n\n Arguments:\n inputs: `tf.data`.\n model: See `model` in `train_function()` method.\n loss_fn: See `loss_fn` in `train_function()` method.\n optimizer: See `optimizer` in `train_function()` method.\n batch_size: See `batch_size` in `train_function()` method.\n\n Returns:\n Loss value.\n \"\"\"\n train_x, train_y = inputs\n with tf.GradientTape() as tape:\n predictions = model(train_x, training=True)\n loss = self.compute_loss(train_y, predictions, loss_fn, batch_size)\n grads = tape.gradient(loss, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n return loss\n\n @tf.function(experimental_relax_shapes=True)\n def distributed_train_step(self, batch_dataset, model, loss_fn, optimizer,\n batch_size, distribution_strategy):\n \"\"\"Train step in distribution strategy setting.\n\n Arguments:\n batch_dataset: `tf.data`.\n model: See `model` in `train_function()` method.\n loss_fn: See `loss_fn` in `train_function()` method.\n optimizer: See `optimizer` in `train_function()` method.\n batch_size: See `batch_size` in `train_function()` method.\n distribution_strategy: See `distribution_strategy` in `train_function()`\n method.\n\n Returns:\n Sum of per_replica_losses.\n \"\"\"\n per_replica_losses = distribution_strategy.run(\n self.train_step,\n args=(\n batch_dataset,\n model,\n loss_fn,\n optimizer,\n batch_size,\n ))\n return distribution_strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n\n def train_function(self,\n model,\n train_dataset,\n loss_fn,\n optimizer,\n epochs=2,\n distribution_strategy=None,\n batch_size=256):\n \"\"\"Train model in custom training loop and return average\n\n train_step_time.\n\n Arguments:\n model: Model function to be benchmarked.\n train_dataset: `tf.data` dataset. Should return a tuple of either (inputs,\n targets) or (inputs, targets, sample_weights).\n loss_fn: `tf.keras.losses.Loss` instance.\n optimizer: `tf.keras.optimizers` instance.\n epochs: Integer. Number of epochs to train the model. If unspecified,\n `epochs` will default to 2.\n distribution_strategy: Distribution strategies. It could be\n `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified,\n `distribution_strategy` will default to 'off'. Note that, `TPU` and\n `parameter_server` are not supported yet.\n batch_size: Integer. Number of samples per gradient update. If\n unspecified, `batch_size` will default to 32.\n\n Returns:\n Average train_step_time.\n \"\"\"\n train_step_time_list = []\n timer = timeit.default_timer\n\n total_loss = 0.0\n num_batches = 0\n for _ in range(epochs):\n # Iterate over the batches of the dataset.\n for batch_dataset in train_dataset:\n\n start_time = timer()\n\n if distribution_strategy is not None:\n total_loss += self.distributed_train_step(batch_dataset, model,\n loss_fn, optimizer,\n batch_size,\n distribution_strategy)\n else:\n total_loss += self.train_step(batch_dataset, model, loss_fn,\n optimizer, batch_size)\n num_batches += 1\n\n end_time = timer()\n train_step_time_list.append(end_time - start_time)\n\n return np.mean(train_step_time_list)\n\n def measure_performance(self,\n model,\n dataset,\n loss_fn,\n optimizer,\n batch_size=32,\n run_iters=4,\n epochs=10,\n distribution_strategy=None):\n \"\"\"Run models and measure the performance.\n\n Arguments:\n model_fn: Model function to be benchmarked.\n dataset: `tf.data` dataset. Should return a tuple of either (inputs,\n targets) or (inputs, targets, sample_weights).\n loss_fn: `tf.keras.losses.Loss` instance.\n optimizer: `tf.keras.optimizers` instance.\n batch_size: Integer. Number of samples per gradient update. If\n unspecified, `batch_size` will default to 32.\n run_iters: Integer. Number of iterations to run the performance\n measurement. If unspecified, `run_iters` will default to 4.\n epochs: Integer. Number of epochs to train the model. If unspecified,\n `epochs` will default to 10.\n distribution_strategy: Distribution strategies. It could be\n `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified,\n `distribution_strategy` will default to 'off'. Note that, `TPU` and\n `parameter_server` are not supported yet.\n\n Returns:\n Performance summary, which contains build_time, avg_epoch_time,\n wall_time, exp_per_sec, epochs, warmup_time, train_step_time.\n\n Raise:\n ValueError: if `dataset` is None or if `optimizer` instance is\n not provided or if `loss_fn` instance is not provided.\n \"\"\"\n if distribution_strategy is not None and \\\n not isinstance(dataset, tf.distribute.DistributedDataset):\n raise ValueError('tf.distribute.DistributedDataset'\n ' required in distribution strategy.')\n\n if distribution_strategy is None and \\\n not isinstance(dataset, tf.data.Dataset):\n raise ValueError('`tf.data` is required.')\n\n if not isinstance(loss_fn, tf.keras.losses.Loss):\n raise ValueError('`tf.keras.losses.Loss` instance '\n 'for loss_fn is required.')\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise ValueError('`tf.keras.optimizers` instance '\n 'for optimizer is required.')\n\n avg_epoch_time_list, train_step_time_list = [], []\n wall_time_list, exp_per_sec_list, warmup_time_list = [], [], []\n\n total_num_examples = epochs * self.num_examples\n\n for _ in range(run_iters):\n timer = timeit.default_timer\n start_time = timer()\n t1 = timer()\n self.train_function(model, dataset, loss_fn, optimizer, 1,\n distribution_strategy, batch_size)\n warmup_time = timer() - t1\n\n t2 = timer()\n train_step_time = self.train_function(model, dataset, loss_fn, optimizer,\n epochs, distribution_strategy,\n batch_size)\n end_time = timer()\n\n train_step_time_list.append(train_step_time)\n warmup_time_list.append(warmup_time)\n wall_time_list.append(end_time - start_time)\n exp_per_sec_list.append(total_num_examples / (end_time - t2))\n avg_epoch_time_list.append((end_time - t2) / epochs)\n\n metrics = []\n metrics.append({\n 'name': 'avg_epoch_time',\n 'value': np.mean(avg_epoch_time_list)\n })\n metrics.append({'name': 'exp_per_sec', 'value': np.mean(exp_per_sec_list)})\n metrics.append({'name': 'warmup_time', 'value': np.mean(warmup_time_list)})\n metrics.append({\n 'name': 'train_step_time',\n 'value': np.mean(train_step_time_list)\n })\n metrics.append({'name': 'epochs', 'value': epochs})\n\n wall_time = np.mean(wall_time_list)\n\n return metrics, wall_time\n\n def benchmark_custom_training_mnist_bs_128(self):\n \"\"\"Measure performance with batch_size=128 and run_iters=5.\"\"\"\n batch_size = 128\n run_iters = 5\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs)\n self.report_benchmark(iters=run_iters, wall_time=wall_time, metrics=metrics)\n\n def benchmark_custom_training_mnist_bs_256(self):\n \"\"\"Measure performance with batch_size=256 and run_iters=5.\"\"\"\n batch_size = 256\n run_iters = 5\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs)\n self.report_benchmark(iters=run_iters, wall_time=wall_time, metrics=metrics)\n\n def benchmark_custom_training_mnist_bs_512(self):\n \"\"\"Measure performance with batch_size=512 and run_iters=10.\"\"\"\n batch_size = 512\n run_iters = 5\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs)\n self.report_benchmark(iters=run_iters, wall_time=wall_time, metrics=metrics)\n\n def benchmark_custom_training_mnist_bs_512_gpu_2(self):\n \"\"\"Measure performance with batch_size=512, run_iters=10, gpu=2 and\n\n distribution_strategy='mirrored'.\n \"\"\"\n batch_size = 512\n run_iters = 10\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n distribution_strategy = 'mirrored'\n\n strategy = distribution_util.get_distribution_strategy(\n distribution_strategy=distribution_strategy, num_gpus=2)\n\n if distribution_strategy != 'off':\n train_dataset = strategy.experimental_distribute_dataset(train_dataset)\n\n strategy_scope = distribution_util.get_strategy_scope(strategy)\n\n with strategy_scope:\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs,\n strategy)\n self.report_benchmark(iters=run_iters, wall_time=wall_time, metrics=metrics)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Utilities related to layer/model functionality.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport weakref\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.utils.conv_utils import convert_kernel\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.utils.get_source_inputs')\ndef get_source_inputs(tensor, layer=None, node_index=None):\n \"\"\"Returns the list of input tensors necessary to compute `tensor`.\n\n Output will always be a list of tensors\n (potentially with 1 element).\n\n Arguments:\n tensor: The tensor to start from.\n layer: Origin layer of the tensor. Will be\n determined via tensor._keras_history if not provided.\n node_index: Origin node index of the tensor.\n\n Returns:\n List of input tensors.\n \"\"\"\n if not hasattr(tensor, '_keras_history'):\n return tensor\n\n if layer is None or node_index:\n layer, node_index, _ = tensor._keras_history\n if not layer._inbound_nodes:\n return [tensor]\n else:\n node = layer._inbound_nodes[node_index]\n if node.is_input:\n # Reached an Input layer, stop recursion.\n return nest.flatten(node.input_tensors)\n else:\n source_tensors = []\n for layer, node_index, _, tensor in node.iterate_inbound():\n previous_sources = get_source_inputs(tensor, layer, node_index)\n # Avoid input redundancy.\n for x in previous_sources:\n if all(x is not t for t in source_tensors):\n source_tensors.append(x)\n return source_tensors\n\n\ndef validate_string_arg(input_data,\n allowable_strings,\n layer_name,\n arg_name,\n allow_none=False,\n allow_callables=False):\n \"\"\"Validates the correctness of a string-based arg.\"\"\"\n if allow_none and input_data is None:\n return\n elif allow_callables and callable(input_data):\n return\n elif isinstance(input_data,\n six.string_types) and input_data in allowable_strings:\n return\n else:\n allowed_args = '`None`, ' if allow_none else ''\n allowed_args += 'a `Callable`, ' if allow_callables else ''\n allowed_args += 'or one of the following values: %s' % (allowable_strings,)\n raise ValueError((\"%s's %s arg received an invalid value %s. \" +\n 'Allowed values are %s.') %\n (layer_name, arg_name, input_data, allowed_args))\n\n\ndef count_params(weights):\n \"\"\"Count the total number of scalars composing the weights.\n\n Arguments:\n weights: An iterable containing the weights on which to compute params\n\n Returns:\n The total number of scalars composing the weights\n \"\"\"\n unique_weights = {id(w): w for w in weights}.values()\n weight_shapes = [w.shape.as_list() for w in unique_weights]\n standardized_weight_shapes = [\n [0 if w_i is None else w_i for w_i in w] for w in weight_shapes\n ]\n return int(sum(np.prod(p) for p in standardized_weight_shapes))\n\n\ndef print_summary(model, line_length=None, positions=None, print_fn=None):\n \"\"\"Prints a summary of a model.\n\n Arguments:\n model: Keras model instance.\n line_length: Total length of printed lines\n (e.g. set this to adapt the display to different\n terminal window sizes).\n positions: Relative or absolute positions of log elements in each line.\n If not provided, defaults to `[.33, .55, .67, 1.]`.\n print_fn: Print function to use.\n It will be called on each line of the summary.\n You can set it to a custom function\n in order to capture the string summary.\n It defaults to `print` (prints to stdout).\n \"\"\"\n if print_fn is None:\n print_fn = print\n\n if model.__class__.__name__ == 'Sequential':\n sequential_like = True\n elif not model._is_graph_network:\n # We treat subclassed models as a simple sequence of layers, for logging\n # purposes.\n sequential_like = True\n else:\n sequential_like = True\n nodes_by_depth = model._nodes_by_depth.values()\n nodes = []\n for v in nodes_by_depth:\n if (len(v) > 1) or (len(v) == 1 and\n len(nest.flatten(v[0].keras_inputs)) > 1):\n # if the model has multiple nodes\n # or if the nodes have multiple inbound_layers\n # the model is no longer sequential\n sequential_like = False\n break\n nodes += v\n if sequential_like:\n # search for shared layers\n for layer in model.layers:\n flag = False\n for node in layer._inbound_nodes:\n if node in nodes:\n if flag:\n sequential_like = False\n break\n else:\n flag = True\n if not sequential_like:\n break\n\n if sequential_like:\n line_length = line_length or 65\n positions = positions or [.45, .85, 1.]\n if positions[-1] <= 1:\n positions = [int(line_length * p) for p in positions]\n # header names for the different log elements\n to_display = ['Layer (type)', 'Output Shape', 'Param #']\n else:\n line_length = line_length or 98\n positions = positions or [.33, .55, .67, 1.]\n if positions[-1] <= 1:\n positions = [int(line_length * p) for p in positions]\n # header names for the different log elements\n to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']\n relevant_nodes = []\n for v in model._nodes_by_depth.values():\n relevant_nodes += v\n\n def print_row(fields, positions):\n line = ''\n for i in range(len(fields)):\n if i > 0:\n line = line[:-1] + ' '\n line += str(fields[i])\n line = line[:positions[i]]\n line += ' ' * (positions[i] - len(line))\n print_fn(line)\n\n print_fn('Model: \"{}\"'.format(model.name))\n print_fn('_' * line_length)\n print_row(to_display, positions)\n print_fn('=' * line_length)\n\n def print_layer_summary(layer):\n \"\"\"Prints a summary for a single layer.\n\n Arguments:\n layer: target layer.\n \"\"\"\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n except RuntimeError: # output_shape unknown in Eager mode.\n output_shape = '?'\n name = layer.name\n cls_name = layer.__class__.__name__\n fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]\n print_row(fields, positions)\n\n def print_layer_summary_with_connections(layer):\n \"\"\"Prints a summary for a single layer (including topological connections).\n\n Arguments:\n layer: target layer.\n \"\"\"\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n connections = []\n for node in layer._inbound_nodes:\n if relevant_nodes and node not in relevant_nodes:\n # node is not part of the current network\n continue\n\n for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():\n connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index,\n tensor_index))\n\n name = layer.name\n cls_name = layer.__class__.__name__\n if not connections:\n first_connection = ''\n else:\n first_connection = connections[0]\n fields = [\n name + ' (' + cls_name + ')', output_shape,\n layer.count_params(), first_connection\n ]\n print_row(fields, positions)\n if len(connections) > 1:\n for i in range(1, len(connections)):\n fields = ['', '', '', connections[i]]\n print_row(fields, positions)\n\n layers = model.layers\n for i in range(len(layers)):\n if sequential_like:\n print_layer_summary(layers[i])\n else:\n print_layer_summary_with_connections(layers[i])\n if i == len(layers) - 1:\n print_fn('=' * line_length)\n else:\n print_fn('_' * line_length)\n\n if hasattr(model, '_collected_trainable_weights'):\n trainable_count = count_params(model._collected_trainable_weights)\n else:\n trainable_count = count_params(model.trainable_weights)\n\n non_trainable_count = count_params(model.non_trainable_weights)\n\n print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))\n print_fn('Trainable params: {:,}'.format(trainable_count))\n print_fn('Non-trainable params: {:,}'.format(non_trainable_count))\n print_fn('_' * line_length)\n\n\ndef gather_trainable_weights(trainable, sub_layers, extra_variables):\n \"\"\"Lists the trainable weights for an object with sub-layers.\n\n Args:\n trainable: Whether the object collecting the variables is trainable.\n sub_layers: A flat list of Layer objects owned by this object, to collect\n variables from.\n extra_variables: Any extra variables to include. Their `.trainable` property\n is used to categorize them.\n\n Returns:\n A list of collected trainable weights/variables.\n \"\"\"\n if not trainable:\n return []\n weights = []\n for layer in sub_layers:\n weights += layer.trainable_weights\n trainable_extra_variables = [\n v for v in extra_variables if v.trainable]\n return weights + trainable_extra_variables\n\n\ndef gather_non_trainable_weights(trainable, sub_layers, extra_variables):\n \"\"\"Lists the non-trainable weights for an object with sub-layers.\n\n Args:\n trainable: Whether the object collecting the variables is trainable.\n sub_layers: A flat list of Layer objects owned by this object, to collect\n variables from.\n extra_variables: Any extra variables to include. Their `.trainable` property\n is used to categorize them.\n\n Returns:\n A list of collected non-trainable weights/variables.\n \"\"\"\n trainable_extra_variables = []\n non_trainable_extra_variables = []\n for v in extra_variables:\n if v.trainable:\n trainable_extra_variables.append(v)\n else:\n non_trainable_extra_variables.append(v)\n weights = []\n for layer in sub_layers:\n weights += layer.non_trainable_weights\n if not trainable:\n trainable_weights = []\n for layer in sub_layers:\n trainable_weights += layer.trainable_weights\n return (trainable_weights + trainable_extra_variables\n + weights + non_trainable_extra_variables)\n return weights + non_trainable_extra_variables\n\n\[email protected]('2020-06-23',\n 'The Theano kernel format is legacy; '\n 'this utility will be removed.')\n@keras_export('keras.utils.convert_all_kernels_in_model')\ndef convert_all_kernels_in_model(model):\n \"\"\"Converts all convolution kernels in a model from Theano to TensorFlow.\n\n Also works from TensorFlow to Theano.\n\n This is used for converting legacy Theano-saved model files.\n\n Arguments:\n model: target model for the conversion.\n \"\"\"\n # Note: SeparableConvolution not included\n # since only supported by TF.\n conv_classes = {\n 'Conv1D',\n 'Conv2D',\n 'Conv3D',\n 'Conv2DTranspose',\n }\n to_assign = []\n for layer in model.layers:\n if layer.__class__.__name__ in conv_classes:\n original_kernel = K.get_value(layer.kernel)\n converted_kernel = convert_kernel(original_kernel)\n to_assign.append((layer.kernel, converted_kernel))\n K.batch_set_value(to_assign)\n\n\ndef convert_dense_weights_data_format(dense,\n previous_feature_map_shape,\n target_data_format='channels_first'):\n \"\"\"Utility useful when changing a convnet's `data_format`.\n\n When porting the weights of a convnet from one data format to the other,\n if the convnet includes a `Flatten` layer\n (applied to the last convolutional feature map)\n followed by a `Dense` layer, the weights of that `Dense` layer\n should be updated to reflect the new dimension ordering.\n\n Arguments:\n dense: The target `Dense` layer.\n previous_feature_map_shape: A shape tuple of 3 integers,\n e.g. `(512, 7, 7)`. The shape of the convolutional\n feature map right before the `Flatten` layer that\n came before the target `Dense` layer.\n target_data_format: One of \"channels_last\", \"channels_first\".\n Set it \"channels_last\"\n if converting a \"channels_first\" model to \"channels_last\",\n or reciprocally.\n \"\"\"\n assert target_data_format in {'channels_last', 'channels_first'}\n kernel, bias = dense.get_weights()\n for i in range(kernel.shape[1]):\n if target_data_format == 'channels_first':\n c, h, w = previous_feature_map_shape\n original_fm_shape = (h, w, c)\n ki = kernel[:, i].reshape(original_fm_shape)\n ki = np.transpose(ki, (2, 0, 1)) # last -> first\n else:\n h, w, c = previous_feature_map_shape\n original_fm_shape = (c, h, w)\n ki = kernel[:, i].reshape(original_fm_shape)\n ki = np.transpose(ki, (1, 2, 0)) # first -> last\n kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))\n dense.set_weights([kernel, bias])\n\n\ndef is_builtin_layer(layer):\n if not getattr(layer, '_keras_api_names', None):\n return False\n\n # Subclasses of `Layer` that are not exported inherit the export name\n # of the base layer class.\n return (layer._keras_api_names != ('keras.layers.Layer',) and\n layer._keras_api_names_v1 != ('keras.layers.Layer',))\n\n\ndef cached_per_instance(f):\n \"\"\"Lightweight decorator for caching lazily constructed properties.\n\n When to use:\n This decorator provides simple caching with minimal overhead. It is designed\n for properties which are expensive to compute and static over the life of a\n class instance, and provides no mechanism for cache invalidation. Thus it is\n best suited for lazily exposing derived properties of other static data.\n\n For classes with custom getattr / setattr behavior (such as trackable\n objects), storing cache results as object attributes is not performant.\n Instead, a specialized cache can significantly reduce property lookup\n overhead. (While still allowing the decorated property to be lazily computed.)\n Consider the following class:\n\n ```\n class MyClass(object):\n def __setattr__(self, key, value):\n # Some expensive class specific code\n # ...\n # ...\n\n super(MyClass, self).__setattr__(key, value)\n\n @property\n def thing(self):\n # `thing` is expensive to compute (and may not even be requested), so we\n # want to lazily compute it and then cache it.\n output = getattr(self, '_thing', None)\n if output is None:\n self._thing = output = compute_thing(self)\n return output\n ```\n\n It's also worth noting that ANY overriding of __setattr__, even something as\n simple as:\n ```\n def __setattr__(self, key, value):\n super(MyClass, self).__setattr__(key, value)\n ```\n\n Slows down attribute assignment by nearly 10x.\n\n By contrast, replacing the definition of `thing` with the following sidesteps\n the expensive __setattr__ altogether:\n\n '''\n @property\n @tracking.cached_per_instance\n def thing(self):\n # `thing` is expensive to compute (and may not even be requested), so we\n # want to lazily compute it and then cache it.\n return compute_thing(self)\n '''\n\n Performance:\n The overhead for this decorator is ~0.4 us / call. A much lower overhead\n implementation (~0.085 us / call) can be achieved by using a custom dict type:\n\n ```\n def dict_based_cache(f):\n class Cache(dict):\n __slots__ = ()\n def __missing__(self, key):\n self[key] = output = f(key)\n return output\n\n return property(Cache().__getitem__)\n ```\n\n However, that implementation holds class instances as keys, and as a result\n blocks garbage collection. (And modifying it to use weakref's as keys raises\n the lookup overhead to ~0.4 us) As a result, the WeakKeyDictionary\n implementation below turns out to be more prudent.\n\n Args:\n f: The function to cache.\n\n Returns:\n f decorated with simple caching behavior.\n \"\"\"\n\n cache = weakref.WeakKeyDictionary()\n\n @functools.wraps(f)\n def wrapped(item):\n output = cache.get(item)\n if output is None:\n cache[item] = output = f(item)\n return output\n\n wrapped.cache = cache\n return wrapped\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class MirroredStrategy implementing tf.distribute.Strategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import distribute_utils\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_run\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as tf_device\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n# TODO(josh11b): Replace asserts in this file with if ...: raise ...\n\n\ndef _is_device_list_single_worker(devices):\n \"\"\"Checks whether the devices list is for single or multi-worker.\n\n Args:\n devices: a list of device strings or tf.config.LogicalDevice objects, for\n either local or for remote devices.\n\n Returns:\n a boolean indicating whether these device strings are for local or for\n remote.\n\n Raises:\n ValueError: if device strings are not consistent.\n \"\"\"\n specs = []\n for d in devices:\n name = d.name if isinstance(d, context.LogicalDevice) else d\n specs.append(tf_device.DeviceSpec.from_string(name))\n num_workers = len({(d.job, d.task, d.replica) for d in specs})\n all_local = all(d.job in (None, \"localhost\") for d in specs)\n any_local = any(d.job in (None, \"localhost\") for d in specs)\n\n if any_local and not all_local:\n raise ValueError(\"Local device string cannot have job specified other \"\n \"than 'localhost'\")\n\n if num_workers == 1 and not all_local:\n if any(d.task is None for d in specs):\n raise ValueError(\"Remote device string must have task specified.\")\n\n return num_workers == 1\n\n\ndef _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):\n \"\"\"Returns a device list given a cluster spec.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)\n devices = []\n for task_type in (\"chief\", \"worker\"):\n for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):\n if num_gpus_per_worker == 0:\n devices.append(\"/job:%s/task:%d/device:CPU:0\" % (task_type, task_id))\n else:\n devices.extend([\n \"/job:%s/task:%d/device:GPU:%i\" % (task_type, task_id, gpu_id)\n for gpu_id in range(num_gpus_per_worker)\n ])\n return devices\n\n\ndef _group_device_list(devices):\n \"\"\"Groups the devices list by task_type and task_id.\n\n Args:\n devices: a list of device strings for remote devices.\n\n Returns:\n a dict of list of device strings mapping from task_type to a list of devices\n for the task_type in the ascending order of task_id.\n \"\"\"\n assert not _is_device_list_single_worker(devices)\n device_dict = {}\n\n for d in devices:\n d_spec = tf_device.DeviceSpec.from_string(d)\n\n # Create an entry for the task_type.\n if d_spec.job not in device_dict:\n device_dict[d_spec.job] = []\n\n # Fill the device list for task_type until it covers the task_id.\n while len(device_dict[d_spec.job]) <= d_spec.task:\n device_dict[d_spec.job].append([])\n\n device_dict[d_spec.job][d_spec.task].append(d)\n\n return device_dict\n\n\ndef _is_gpu_device(device):\n return tf_device.DeviceSpec.from_string(device).device_type == \"GPU\"\n\n\ndef _infer_num_gpus_per_worker(devices):\n \"\"\"Infers the number of GPUs on each worker.\n\n Currently to make multi-worker cross device ops work, we need all workers to\n have the same number of GPUs.\n\n Args:\n devices: a list of device strings, can be either local devices or remote\n devices.\n\n Returns:\n number of GPUs per worker.\n\n Raises:\n ValueError if workers have different number of GPUs or GPU indices are not\n consecutive and starting from 0.\n \"\"\"\n if _is_device_list_single_worker(devices):\n return sum(1 for d in devices if _is_gpu_device(d))\n else:\n device_dict = _group_device_list(devices)\n num_gpus = None\n for _, devices_in_task in device_dict.items():\n for device_in_task in devices_in_task:\n if num_gpus is None:\n num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d))\n\n # Verify other workers have the same number of GPUs.\n elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)):\n raise ValueError(\"All workers should have the same number of GPUs.\")\n\n for d in device_in_task:\n d_spec = tf_device.DeviceSpec.from_string(d)\n if (d_spec.device_type == \"GPU\" and\n d_spec.device_index >= num_gpus):\n raise ValueError(\"GPU `device_index` on a worker should be \"\n \"consecutive and start from 0.\")\n return num_gpus\n\n\ndef all_local_devices(num_gpus=None):\n devices = config.list_logical_devices(\"GPU\")\n if num_gpus is not None:\n devices = devices[:num_gpus]\n return devices or config.list_logical_devices(\"CPU\")\n\n\ndef all_devices():\n devices = []\n tfconfig = TFConfigClusterResolver()\n if tfconfig.cluster_spec().as_dict():\n devices = _cluster_spec_to_device_list(tfconfig.cluster_spec(),\n context.num_gpus())\n return devices if devices else all_local_devices()\n\n\n@tf_export(\"distribute.MirroredStrategy\", v1=[]) # pylint: disable=g-classes-have-attributes\nclass MirroredStrategy(distribute_lib.Strategy):\n \"\"\"Synchronous training across multiple replicas on one machine.\n\n This strategy is typically used for training on one\n machine with multiple GPUs. For TPUs, use\n `tf.distribute.TPUStrategy`. To use `MirroredStrategy` with multiple workers,\n please refer to `tf.distribute.experimental.MultiWorkerMirroredStrategy`.\n\n For example, a variable created under a `MirroredStrategy` is a\n `MirroredVariable`. If no devices are specified in the constructor argument of\n the strategy then it will use all the available GPUs. If no GPUs are found, it\n will use the available CPUs. Note that TensorFlow treats all CPUs on a\n machine as a single device, and uses threads internally for parallelism.\n\n >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n >>> with strategy.scope():\n ... x = tf.Variable(1.)\n >>> x\n MirroredVariable:{\n 0: <tf.Variable ... shape=() dtype=float32, numpy=1.0>,\n 1: <tf.Variable ... shape=() dtype=float32, numpy=1.0>\n }\n\n While using distribution strategies, all the variable creation should be done\n within the strategy's scope. This will replicate the variables across all the\n replicas and keep them in sync using an all-reduce algorithm.\n\n Variables created inside a `MirroredStrategy` which is wrapped with a\n `tf.function` are still `MirroredVariables`.\n\n >>> x = []\n >>> @tf.function # Wrap the function with tf.function.\n ... def create_variable():\n ... if not x:\n ... x.append(tf.Variable(1.))\n ... return x[0]\n >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"])\n >>> with strategy.scope():\n ... _ = create_variable()\n ... print(x[0])\n MirroredVariable:{\n 0: <tf.Variable ... shape=() dtype=float32, numpy=1.0>,\n 1: <tf.Variable ... shape=() dtype=float32, numpy=1.0>\n }\n\n `experimental_distribute_dataset` can be used to distribute the dataset across\n the replicas when writing your own training loop. If you are using `.fit` and\n `.compile` methods available in `tf.keras`, then `tf.keras` will handle the\n distribution for you.\n\n For example:\n\n ```python\n my_strategy = tf.distribute.MirroredStrategy()\n with my_strategy.scope():\n @tf.function\n def distribute_train_epoch(dataset):\n def replica_fn(input):\n # process input and return result\n return result\n\n total_result = 0\n for x in dataset:\n per_replica_result = my_strategy.run(replica_fn, args=(x,))\n total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM,\n per_replica_result, axis=None)\n return total_result\n\n dist_dataset = my_strategy.experimental_distribute_dataset(dataset)\n for _ in range(EPOCHS):\n train_result = distribute_train_epoch(dist_dataset)\n ```\n\n Args:\n devices: a list of device strings such as `['/gpu:0', '/gpu:1']`. If\n `None`, all available GPUs are used. If no GPUs are found, CPU is used.\n cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not\n set, `NcclAllReduce()` will be used by default. One would customize this\n if NCCL isn't available or if a special implementation that exploits\n the particular hardware is available.\n \"\"\"\n\n def __init__(self, devices=None, cross_device_ops=None):\n extended = MirroredExtended(\n self, devices=devices, cross_device_ops=cross_device_ops)\n super(MirroredStrategy, self).__init__(extended)\n distribute_lib.distribution_strategy_gauge.get_cell(\"V2\").set(\n \"MirroredStrategy\")\n\n\n@tf_export(v1=[\"distribute.MirroredStrategy\"])\nclass MirroredStrategyV1(distribute_lib.StrategyV1): # pylint: disable=g-missing-docstring\n\n __doc__ = MirroredStrategy.__doc__\n\n def __init__(self, devices=None, cross_device_ops=None):\n extended = MirroredExtended(\n self, devices=devices, cross_device_ops=cross_device_ops)\n super(MirroredStrategyV1, self).__init__(extended)\n distribute_lib.distribution_strategy_gauge.get_cell(\"V1\").set(\n \"MirroredStrategy\")\n\n\n# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.\nclass MirroredExtended(distribute_lib.StrategyExtendedV1):\n \"\"\"Implementation of MirroredStrategy.\"\"\"\n\n def __init__(self, container_strategy, devices=None, cross_device_ops=None):\n super(MirroredExtended, self).__init__(container_strategy)\n if context.executing_eagerly():\n if devices and not _is_device_list_single_worker(devices):\n raise RuntimeError(\"In-graph multi-worker training with \"\n \"`MirroredStrategy` is not supported in eager mode.\")\n else:\n if TFConfigClusterResolver().cluster_spec().as_dict():\n # if you are executing in eager mode, only the single machine code\n # path is supported.\n logging.info(\"Initializing local devices since in-graph multi-worker \"\n \"training with `MirroredStrategy` is not supported in \"\n \"eager mode. TF_CONFIG will be ignored when \"\n \"when initializing `MirroredStrategy`.\")\n devices = devices or all_local_devices()\n else:\n devices = devices or all_devices()\n\n assert devices, (\"Got an empty `devices` list and unable to recognize \"\n \"any local devices.\")\n self._cross_device_ops = cross_device_ops\n self._initialize_strategy(devices)\n\n # TODO(b/128995245): Enable last partial batch support in graph mode.\n if ops.executing_eagerly_outside_functions():\n self.experimental_enable_get_next_as_optional = True\n\n # Flag to turn on VariablePolicy.\n self._use_var_policy = False\n\n def _initialize_strategy(self, devices):\n # The _initialize_strategy method is intended to be used by distribute\n # coordinator as well.\n assert devices, \"Must specify at least one device.\"\n devices = tuple(device_util.resolve(d) for d in devices)\n assert len(set(devices)) == len(devices), (\n \"No duplicates allowed in `devices` argument: %s\" % (devices,))\n if _is_device_list_single_worker(devices):\n self._initialize_single_worker(devices)\n else:\n self._initialize_multi_worker(devices)\n\n def _initialize_single_worker(self, devices):\n \"\"\"Initializes the object for single-worker training.\"\"\"\n self._devices = tuple(device_util.canonicalize(d) for d in devices)\n self._input_workers_devices = (\n (device_util.canonicalize(\"/device:CPU:0\", devices[0]), devices),)\n self._inferred_cross_device_ops = None if self._cross_device_ops else (\n cross_device_ops_lib.choose_the_best(devices))\n self._host_input_device = numpy_dataset.SingleDevice(\n self._input_workers_devices[0][0])\n self._is_multi_worker_training = False\n logging.info(\"Using MirroredStrategy with devices %r\", devices)\n device_spec = tf_device.DeviceSpec.from_string(\n self._input_workers_devices[0][0])\n # Ensures when we enter strategy.scope() we use the correct default device\n if device_spec.job is not None and device_spec.job != \"localhost\":\n self._default_device = \"/job:%s/replica:%d/task:%d\" % (\n device_spec.job, device_spec.replica, device_spec.task)\n\n def _initialize_multi_worker(self, devices):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n device_dict = _group_device_list(devices)\n workers = []\n worker_devices = []\n for job in (\"chief\", \"worker\"):\n for task in range(len(device_dict.get(job, []))):\n worker = \"/job:%s/task:%d\" % (job, task)\n workers.append(worker)\n worker_devices.append((worker, device_dict[job][task]))\n\n # Setting `_default_device` will add a device scope in the\n # distribution.scope. We set the default device to the first worker. When\n # users specify device under distribution.scope by\n # with tf.device(\"/cpu:0\"):\n # ...\n # their ops will end up on the cpu device of its first worker, e.g.\n # \"/job:worker/task:0/device:CPU:0\". Note this is not used in replica mode.\n self._default_device = workers[0]\n self._host_input_device = numpy_dataset.SingleDevice(workers[0])\n\n self._devices = tuple(devices)\n self._input_workers_devices = worker_devices\n self._is_multi_worker_training = True\n\n if len(workers) > 1:\n # Grandfather usage in the legacy tests if they're configured properly.\n if (not isinstance(self._cross_device_ops,\n cross_device_ops_lib.ReductionToOneDevice) or\n self._cross_device_ops._num_between_graph_workers > 1): # pylint: disable=protected-access\n raise ValueError(\n \"In-graph multi-worker training with `MirroredStrategy` is not \"\n \"supported.\")\n self._inferred_cross_device_ops = self._cross_device_ops\n else:\n # TODO(yuefengz): make `choose_the_best` work with device strings\n # containing job names.\n self._inferred_cross_device_ops = cross_device_ops_lib.NcclAllReduce()\n\n logging.info(\"Using MirroredStrategy with remote devices %r\", devices)\n\n def _input_workers_with_options(self, options=None):\n if not options or options.experimental_prefetch_to_device:\n return input_lib.InputWorkers(self._input_workers_devices)\n else:\n return input_lib.InputWorkers(\n [(host_device, (host_device,) * len(compute_devices)) for\n host_device, compute_devices in self._input_workers_devices])\n\n @property\n def _input_workers(self):\n return self._input_workers_with_options()\n\n def _get_variable_creator_initial_value(self,\n replica_id,\n device,\n primary_var,\n **kwargs):\n \"\"\"Return the initial value for variables on a replica.\"\"\"\n if replica_id == 0:\n return kwargs[\"initial_value\"]\n else:\n assert primary_var is not None\n assert device is not None\n assert kwargs is not None\n\n def initial_value_fn():\n if context.executing_eagerly() or ops.inside_function():\n init_value = primary_var.value()\n return array_ops.identity(init_value)\n else:\n with ops.device(device):\n init_value = primary_var.initial_value\n return array_ops.identity(init_value)\n\n return initial_value_fn\n\n def _create_variable(self, next_creator, **kwargs):\n \"\"\"Create a mirrored variable. See `DistributionStrategy.scope`.\"\"\"\n colocate_with = kwargs.pop(\"colocate_with\", None)\n if colocate_with is None:\n devices = self._devices\n elif isinstance(colocate_with, numpy_dataset.SingleDevice):\n with ops.device(colocate_with.device):\n return next_creator(**kwargs)\n else:\n devices = colocate_with._devices # pylint: disable=protected-access\n\n def _real_mirrored_creator(**kwargs): # pylint: disable=g-missing-docstring\n value_list = []\n for i, d in enumerate(devices):\n with ops.device(d):\n kwargs[\"initial_value\"] = self._get_variable_creator_initial_value(\n replica_id=i,\n device=d,\n primary_var=value_list[0] if value_list else None,\n **kwargs)\n if i > 0:\n # Give replicas meaningful distinct names:\n var0name = value_list[0].name.split(\":\")[0]\n # We append a / to variable names created on replicas with id > 0 to\n # ensure that we ignore the name scope and instead use the given\n # name as the absolute name of the variable.\n kwargs[\"name\"] = \"%s/replica_%d/\" % (var0name, i)\n with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n # Don't record operations (e.g. other variable reads) during\n # variable creation.\n with tape.stop_recording():\n v = next_creator(**kwargs)\n assert not isinstance(v, values.DistributedVariable)\n value_list.append(v)\n return value_list\n\n return distribute_utils.create_mirrored_variable(\n self._container_strategy(), _real_mirrored_creator,\n distribute_utils.VARIABLE_CLASS_MAPPING,\n distribute_utils.VARIABLE_POLICY_MAPPING, **kwargs)\n\n def _validate_colocate_with_variable(self, colocate_with_variable):\n distribute_utils.validate_colocate_distributed_variable(\n colocate_with_variable, self)\n\n def _make_dataset_iterator(self, dataset):\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n input_contexts = []\n num_workers = self._input_workers.num_workers\n for i in range(num_workers):\n input_contexts.append(distribute_lib.InputContext(\n num_input_pipelines=num_workers,\n input_pipeline_id=i,\n num_replicas_in_sync=self._num_replicas_in_sync))\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n input_contexts,\n self._container_strategy())\n\n def _experimental_distribute_dataset(self, dataset, options):\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers_with_options(options),\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync)\n\n def _experimental_make_numpy_dataset(self, numpy_input, session):\n return numpy_dataset.one_host_numpy_dataset(\n numpy_input, self._host_input_device, session)\n\n def _experimental_distribute_datasets_from_function(self, dataset_fn,\n options):\n input_contexts = []\n input_workers = self._input_workers_with_options(options)\n num_workers = input_workers.num_workers\n for i in range(num_workers):\n input_contexts.append(distribute_lib.InputContext(\n num_input_pipelines=num_workers,\n input_pipeline_id=i,\n num_replicas_in_sync=self._num_replicas_in_sync))\n\n return input_lib.get_distributed_datasets_from_function(\n dataset_fn,\n input_workers,\n input_contexts,\n self._container_strategy())\n\n def _experimental_distribute_values_from_function(self, value_fn):\n per_replica_values = []\n for replica_id in range(self._num_replicas_in_sync):\n per_replica_values.append(value_fn(\n distribute_lib.ValueContext(replica_id,\n self._num_replicas_in_sync)))\n return distribute_utils.regroup(per_replica_values, always_wrap=True)\n\n # TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.\n def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,\n initial_loop_values=None):\n if initial_loop_values is None:\n initial_loop_values = {}\n initial_loop_values = nest.flatten(initial_loop_values)\n\n ctx = input_lib.MultiStepContext()\n def body(i, *args):\n \"\"\"A wrapper around `fn` to create the while loop body.\"\"\"\n del args\n fn_result = fn(ctx, iterator.get_next())\n for (name, output) in ctx.last_step_outputs.items():\n # Convert all outputs to tensors, potentially from `DistributedValues`.\n ctx.last_step_outputs[name] = self._local_results(output)\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs\n\n # We capture the control_flow_context at this point, before we run `fn`\n # inside a while_loop. This is useful in cases where we might need to exit\n # these contexts and get back to the outer context to do some things, for\n # e.g. create an op which should be evaluated only once at the end of the\n # loop on the host. One such usage is in creating metrics' value op.\n self._outer_control_flow_context = (\n ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access\n\n cond = lambda i, *args: i < iterations\n i = constant_op.constant(0)\n loop_result = control_flow_ops.while_loop(\n cond, body, [i] + initial_loop_values, name=\"\",\n parallel_iterations=1, back_prop=False, swap_memory=False,\n return_same_structure=True)\n del self._outer_control_flow_context\n\n ctx.run_op = control_flow_ops.group(loop_result)\n\n # Convert the last_step_outputs from a list to the original dict structure\n # of last_step_outputs.\n last_step_tensor_outputs = loop_result[1:]\n last_step_tensor_outputs_dict = nest.pack_sequence_as(\n ctx.last_step_outputs, last_step_tensor_outputs)\n\n for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access\n output = last_step_tensor_outputs_dict[name]\n # For outputs that have already been reduced, wrap them in a Mirrored\n # container, else in a PerReplica container.\n if reduce_op is None:\n last_step_tensor_outputs_dict[name] = distribute_utils.regroup(output)\n else:\n assert len(output) == 1\n last_step_tensor_outputs_dict[name] = output[0]\n\n ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access\n return ctx\n\n def _broadcast_to(self, tensor, destinations):\n # This is both a fast path for Python constants, and a way to delay\n # converting Python values to a tensor until we know what type it\n # should be converted to. Otherwise we have trouble with:\n # global_step.assign_add(1)\n # since the `1` gets broadcast as an int32 but global_step is int64.\n if isinstance(tensor, (float, int)):\n return tensor\n # TODO(josh11b): In eager mode, use one thread per device, or async mode.\n if not destinations:\n # TODO(josh11b): Use current logical device instead of 0 here.\n destinations = self._devices\n return self._get_cross_device_ops(tensor).broadcast(tensor, destinations)\n\n def _call_for_each_replica(self, fn, args, kwargs):\n return mirrored_run.call_for_each_replica(\n self._container_strategy(), fn, args, kwargs)\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n del task_type, task_id\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n if cluster_spec:\n # TODO(yuefengz): remove the following code once cluster_resolver is\n # added.\n num_gpus_per_worker = _infer_num_gpus_per_worker(self._devices)\n multi_worker_devices = _cluster_spec_to_device_list(\n cluster_spec, num_gpus_per_worker)\n self._initialize_multi_worker(multi_worker_devices)\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n updated_config.isolate_session_state = True\n return updated_config\n\n def _get_cross_device_ops(self, value):\n del value # Unused.\n return self._cross_device_ops or self._inferred_cross_device_ops\n\n def _reduce_to(self, reduce_op, value, destinations, experimental_hints):\n if (distribute_utils.is_mirrored(value) and\n reduce_op == reduce_util.ReduceOp.MEAN):\n return value\n assert not distribute_utils.is_mirrored(value)\n if not isinstance(value, values.DistributedValues):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, value, destinations, self._num_replicas_in_sync)\n return self._get_cross_device_ops(value).reduce(\n reduce_op,\n value,\n destinations=destinations,\n experimental_hints=experimental_hints)\n\n def _batch_reduce_to(self, reduce_op, value_destination_pairs,\n experimental_hints):\n cross_device_ops = None\n for value, _ in value_destination_pairs:\n if cross_device_ops is None:\n cross_device_ops = self._get_cross_device_ops(value)\n elif cross_device_ops is not self._get_cross_device_ops(value):\n raise ValueError(\"inputs to batch_reduce_to must be either all on the \"\n \"the host or all on the compute devices\")\n return cross_device_ops.batch_reduce(reduce_op, value_destination_pairs,\n experimental_hints)\n\n def _update(self, var, fn, args, kwargs, group):\n # TODO(josh11b): In eager mode, use one thread per device.\n assert isinstance(var, values.DistributedVariable)\n updates = []\n for i, v in enumerate(var.values):\n name = \"update_%d\" % i\n with ops.device(v.device), \\\n distribute_lib.UpdateContext(i), \\\n ops.name_scope(name):\n # If args and kwargs are not mirrored, the value is returned as is.\n updates.append(\n fn(v, *distribute_utils.select_replica_mirrored(i, args),\n **distribute_utils.select_replica_mirrored(i, kwargs)))\n return distribute_utils.update_regroup(self, updates, group)\n\n def _update_non_slot(self, colocate_with, fn, args, kwargs, group):\n assert isinstance(colocate_with, tuple)\n # TODO(josh11b): In eager mode, use one thread per device.\n updates = []\n for i, d in enumerate(colocate_with):\n name = \"update_%d\" % i\n with ops.device(d), distribute_lib.UpdateContext(i), ops.name_scope(name):\n updates.append(\n fn(*distribute_utils.select_replica_mirrored(i, args),\n **distribute_utils.select_replica_mirrored(i, kwargs)))\n return distribute_utils.update_regroup(self, updates, group)\n\n def read_var(self, replica_local_var):\n \"\"\"Read the aggregate value of a replica-local variable.\"\"\"\n # pylint: disable=protected-access\n if distribute_utils.is_sync_on_read(replica_local_var):\n return replica_local_var._get_cross_replica()\n assert distribute_utils.is_mirrored(replica_local_var)\n return array_ops.identity(replica_local_var._get())\n # pylint: enable=protected-access\n\n def _local_results(self, val):\n if isinstance(val, values.DistributedValues):\n return val._values # pylint: disable=protected-access\n return (val,)\n\n def value_container(self, val):\n return distribute_utils.value_container(val)\n\n @property\n def _num_replicas_in_sync(self):\n return len(self._devices)\n\n @property\n def worker_devices(self):\n return self._devices\n\n @property\n def worker_devices_by_replica(self):\n return [[d] for d in self._devices]\n\n @property\n def parameter_devices(self):\n return self.worker_devices\n\n @property\n def experimental_between_graph(self):\n return False\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return True\n\n @property\n def should_save_summary(self):\n return True\n\n def non_slot_devices(self, var_list):\n del var_list\n # TODO(josh11b): Should this be the last logical device instead?\n return self._devices\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n\n def _in_multi_worker_mode(self):\n \"\"\"Whether this strategy indicates working in multi-worker settings.\"\"\"\n return False\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utils for make_zip tests.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport itertools\nimport operator\nimport os\nimport re\nimport string\nimport traceback\nimport zipfile\n\nimport numpy as np\nfrom six import StringIO\n\n# pylint: disable=g-import-not-at-top\nimport tensorflow.compat.v1 as tf\nfrom google.protobuf import text_format\nfrom tensorflow.lite.testing import _pywrap_string_util\nfrom tensorflow.lite.testing import generate_examples_report as report_lib\nfrom tensorflow.python.framework import graph_util as tf_graph_util\n\n# A map from names to functions which make test cases.\n_MAKE_TEST_FUNCTIONS_MAP = {}\n\n\n# A decorator to register the make test functions.\n# Usage:\n# All the make_*_test should be registered. Example:\n# @register_make_test_function()\n# def make_conv_tests(options):\n# # ...\n# If a function is decorated by other decorators, it's required to specify the\n# name explicitly. Example:\n# @register_make_test_function(name=\"make_unidirectional_sequence_lstm_tests\")\n# @test_util.enable_control_flow_v2\n# def make_unidirectional_sequence_lstm_tests(options):\n# # ...\ndef register_make_test_function(name=None):\n\n def decorate(function, name=name):\n if name is None:\n name = function.__name__\n _MAKE_TEST_FUNCTIONS_MAP[name] = function\n\n return decorate\n\n\ndef get_test_function(test_function_name):\n \"\"\"Get the test function according to the test function name.\"\"\"\n\n if test_function_name not in _MAKE_TEST_FUNCTIONS_MAP:\n return None\n return _MAKE_TEST_FUNCTIONS_MAP[test_function_name]\n\n\nRANDOM_SEED = 342\n\nTF_TYPE_INFO = {\n tf.float32: (np.float32, \"FLOAT\"),\n tf.float16: (np.float16, \"FLOAT\"),\n tf.float64: (np.double, \"FLOAT64\"),\n tf.int32: (np.int32, \"INT32\"),\n tf.uint8: (np.uint8, \"QUANTIZED_UINT8\"),\n tf.int16: (np.int16, \"QUANTIZED_INT16\"),\n tf.int64: (np.int64, \"INT64\"),\n tf.bool: (np.bool, \"BOOL\"),\n tf.string: (np.string_, \"STRING\"),\n}\n\n\nclass ExtraTocoOptions(object):\n \"\"\"Additional toco options besides input, output, shape.\"\"\"\n\n def __init__(self):\n # Whether to ignore control dependency nodes.\n self.drop_control_dependency = False\n # Allow custom ops in the toco conversion.\n self.allow_custom_ops = False\n # Rnn states that are used to support rnn / lstm cells.\n self.rnn_states = None\n # Split the LSTM inputs from 5 inputs to 18 inputs for TFLite.\n self.split_tflite_lstm_inputs = None\n # The inference input type passed to TFLiteConvert.\n self.inference_input_type = None\n # The inference output type passed to TFLiteConvert.\n self.inference_output_type = None\n\n\ndef create_tensor_data(dtype, shape, min_value=-100, max_value=100):\n \"\"\"Build tensor data spreading the range [min_value, max_value).\"\"\"\n\n if dtype in TF_TYPE_INFO:\n dtype = TF_TYPE_INFO[dtype][0]\n\n if dtype in (tf.float32, tf.float16, tf.float64):\n value = (max_value - min_value) * np.random.random_sample(shape) + min_value\n elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):\n value = np.random.randint(min_value, max_value + 1, shape)\n elif dtype == tf.bool:\n value = np.random.choice([True, False], size=shape)\n elif dtype == np.string_:\n # Not the best strings, but they will do for some basic testing.\n letters = list(string.ascii_uppercase)\n return np.random.choice(letters, size=shape).astype(dtype)\n return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(\n dtype)\n\n\ndef create_scalar_data(dtype, min_value=-100, max_value=100):\n \"\"\"Build scalar tensor data range from min_value to max_value exclusively.\"\"\"\n\n if dtype in TF_TYPE_INFO:\n dtype = TF_TYPE_INFO[dtype][0]\n\n if dtype in (tf.float32, tf.float16, tf.float64):\n value = (max_value - min_value) * np.random.random() + min_value\n elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):\n value = np.random.randint(min_value, max_value + 1)\n elif dtype == tf.bool:\n value = np.random.choice([True, False])\n elif dtype == np.string_:\n l = np.random.randint(1, 6)\n value = \"\".join(np.random.choice(list(string.ascii_uppercase), size=l))\n return np.array(value, dtype=dtype)\n\n\ndef freeze_graph(session, outputs):\n \"\"\"Freeze the current graph.\n\n Args:\n session: Tensorflow sessions containing the graph\n outputs: List of output tensors\n\n Returns:\n The frozen graph_def.\n \"\"\"\n return tf_graph_util.convert_variables_to_constants(\n session, session.graph.as_graph_def(), [x.op.name for x in outputs])\n\n\ndef format_result(t):\n \"\"\"Convert a tensor to a format that can be used in test specs.\"\"\"\n if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:\n # Output 9 digits after the point to ensure the precision is good enough.\n values = [\"{:.9f}\".format(value) for value in list(t.flatten())]\n return \",\".join(values)\n else:\n # SerializeAsHexString returns bytes in PY3, so decode if appropriate.\n return _pywrap_string_util.SerializeAsHexString(t.flatten()).decode(\"utf-8\")\n\n\ndef write_examples(fp, examples):\n \"\"\"Given a list `examples`, write a text format representation.\n\n The file format is csv like with a simple repeated pattern. We would ike\n to use proto here, but we can't yet due to interfacing with the Android\n team using this format.\n\n Args:\n fp: File-like object to write to.\n examples: Example dictionary consisting of keys \"inputs\" and \"outputs\"\n \"\"\"\n\n def write_tensor(fp, x):\n \"\"\"Write tensor in file format supported by TFLITE example.\"\"\"\n fp.write(\"dtype,%s\\n\" % x.dtype)\n fp.write(\"shape,\" + \",\".join(map(str, x.shape)) + \"\\n\")\n fp.write(\"values,\" + format_result(x) + \"\\n\")\n\n fp.write(\"test_cases,%d\\n\" % len(examples))\n for example in examples:\n fp.write(\"inputs,%d\\n\" % len(example[\"inputs\"]))\n for i in example[\"inputs\"]:\n write_tensor(fp, i)\n fp.write(\"outputs,%d\\n\" % len(example[\"outputs\"]))\n for i in example[\"outputs\"]:\n write_tensor(fp, i)\n\n\ndef write_test_cases(fp, model_name, examples):\n \"\"\"Given a dictionary of `examples`, write a text format representation.\n\n The file format is protocol-buffer-like, even though we don't use proto due\n to the needs of the Android team.\n\n Args:\n fp: File-like object to write to.\n model_name: Filename where the model was written to, relative to filename.\n examples: Example dictionary consisting of keys \"inputs\" and \"outputs\"\n \"\"\"\n\n fp.write(\"load_model: %s\\n\" % os.path.basename(model_name))\n for example in examples:\n fp.write(\"reshape {\\n\")\n for t in example[\"inputs\"]:\n fp.write(\" input: \\\"\" + \",\".join(map(str, t.shape)) + \"\\\"\\n\")\n fp.write(\"}\\n\")\n fp.write(\"invoke {\\n\")\n\n for t in example[\"inputs\"]:\n fp.write(\" input: \\\"\" + format_result(t) + \"\\\"\\n\")\n for t in example[\"outputs\"]:\n fp.write(\" output: \\\"\" + format_result(t) + \"\\\"\\n\")\n fp.write(\" output_shape: \\\"\" + \",\".join([str(dim) for dim in t.shape]) +\n \"\\\"\\n\")\n fp.write(\"}\\n\")\n\n\ndef get_input_shapes_map(input_tensors):\n \"\"\"Gets a map of input names to shapes.\n\n Args:\n input_tensors: List of input tensor tuples `(name, shape, type)`.\n\n Returns:\n {string : list of integers}.\n \"\"\"\n input_arrays = [tensor[0] for tensor in input_tensors]\n input_shapes_list = []\n\n for _, shape, _ in input_tensors:\n dims = None\n if shape:\n dims = [dim.value for dim in shape.dims]\n input_shapes_list.append(dims)\n\n input_shapes = {\n name: shape\n for name, shape in zip(input_arrays, input_shapes_list)\n if shape\n }\n return input_shapes\n\n\ndef _normalize_output_name(output_name):\n \"\"\"Remove :0 suffix from tensor names.\"\"\"\n return output_name.split(\":\")[0] if output_name.endswith(\n \":0\") else output_name\n\n\n# How many test cases we may have in a zip file. Too many test cases will\n# slow down the test data generation process.\n_MAX_TESTS_PER_ZIP = 500\n\n\ndef make_zip_of_tests(options,\n test_parameters,\n make_graph,\n make_test_inputs,\n extra_toco_options=ExtraTocoOptions(),\n use_frozen_graph=False,\n expected_tf_failures=0):\n \"\"\"Helper to make a zip file of a bunch of TensorFlow models.\n\n This does a cartesian product of the dictionary of test_parameters and\n calls make_graph() for each item in the cartesian product set.\n If the graph is built successfully, then make_test_inputs() is called to\n build expected input/output value pairs. The model is then converted to tflite\n with toco, and the examples are serialized with the tflite model into a zip\n file (2 files per item in the cartesian product set).\n\n Args:\n options: An Options instance.\n test_parameters: Dictionary mapping to lists for each parameter.\n e.g. `{\"strides\": [[1,3,3,1], [1,2,2,1]], \"foo\": [1.2, 1.3]}`\n make_graph: function that takes current parameters and returns tuple\n `[input1, input2, ...], [output1, output2, ...]`\n make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,\n `output_tensors` and returns tuple `(input_values, output_values)`.\n extra_toco_options: Additional toco options.\n use_frozen_graph: Whether or not freeze graph before toco converter.\n expected_tf_failures: Number of times tensorflow is expected to fail in\n executing the input graphs. In some cases it is OK for TensorFlow to fail\n because the one or more combination of parameters is invalid.\n\n Raises:\n RuntimeError: if there are converter errors that can't be ignored.\n \"\"\"\n zip_path = os.path.join(options.output_path, options.zip_to_output)\n parameter_count = 0\n for parameters in test_parameters:\n parameter_count += functools.reduce(\n operator.mul, [len(values) for values in parameters.values()])\n\n all_parameter_count = parameter_count\n if options.multi_gen_state:\n all_parameter_count += options.multi_gen_state.parameter_count\n if not options.no_tests_limit and all_parameter_count > _MAX_TESTS_PER_ZIP:\n raise RuntimeError(\n \"Too many parameter combinations for generating '%s'.\\n\"\n \"There are at least %d combinations while the upper limit is %d.\\n\"\n \"Having too many combinations will slow down the tests.\\n\"\n \"Please consider splitting the test into multiple functions.\\n\" %\n (zip_path, all_parameter_count, _MAX_TESTS_PER_ZIP))\n if options.multi_gen_state:\n options.multi_gen_state.parameter_count = all_parameter_count\n\n # TODO(aselle): Make this allow multiple inputs outputs.\n if options.multi_gen_state:\n archive = options.multi_gen_state.archive\n else:\n archive = zipfile.PyZipFile(zip_path, \"w\")\n zip_manifest = []\n convert_report = []\n toco_errors = 0\n\n processed_labels = set()\n\n if options.make_edgetpu_tests:\n extra_toco_options.inference_input_type = tf.uint8\n extra_toco_options.inference_output_type = tf.uint8\n # Only count parameters when fully_quantize is True.\n parameter_count = 0\n for parameters in test_parameters:\n if True in parameters.get(\"fully_quantize\",\n []) and False in parameters.get(\n \"quant_16x8\", [False]):\n parameter_count += functools.reduce(operator.mul, [\n len(values)\n for key, values in parameters.items()\n if key != \"fully_quantize\" and key != \"quant_16x8\"\n ])\n\n label_base_path = zip_path\n if options.multi_gen_state:\n label_base_path = options.multi_gen_state.label_base_path\n\n for parameters in test_parameters:\n keys = parameters.keys()\n for curr in itertools.product(*parameters.values()):\n label = label_base_path.replace(\".zip\", \"_\") + (\",\".join(\n \"%s=%r\" % z for z in sorted(zip(keys, curr))).replace(\" \", \"\"))\n if label[0] == \"/\":\n label = label[1:]\n if label in processed_labels:\n # Do not populate data for the same label more than once. It will cause\n # errors when unzipping.\n continue\n processed_labels.add(label)\n\n param_dict = dict(zip(keys, curr))\n\n if options.make_edgetpu_tests and (not param_dict.get(\n \"fully_quantize\", False) or param_dict.get(\"quant_16x8\", False)):\n continue\n\n def generate_inputs_outputs(tflite_model_binary,\n min_value=0,\n max_value=255):\n \"\"\"Generate input values and output values of the given tflite model.\n\n Args:\n tflite_model_binary: A serialized flatbuffer as a string.\n min_value: min value for the input tensor.\n max_value: max value for the input tensor.\n\n Returns:\n (input_values, output_values): input values and output values built.\n \"\"\"\n interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n input_values = []\n for input_detail in input_details:\n input_value = create_tensor_data(\n input_detail[\"dtype\"],\n input_detail[\"shape\"],\n min_value=min_value,\n max_value=max_value)\n interpreter.set_tensor(input_detail[\"index\"], input_value)\n input_values.append(input_value)\n\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n output_values = []\n for output_detail in output_details:\n output_values.append(interpreter.get_tensor(output_detail[\"index\"]))\n\n return input_values, output_values\n\n def build_example(label, param_dict_real):\n \"\"\"Build the model with parameter values set in param_dict_real.\n\n Args:\n label: Label of the model (i.e. the filename in the zip).\n param_dict_real: Parameter dictionary (arguments to the factories\n make_graph and make_test_inputs)\n\n Returns:\n (tflite_model_binary, report) where tflite_model_binary is the\n serialized flatbuffer as a string and report is a dictionary with\n keys `toco_log` (log of toco conversion), `tf_log` (log of tf\n conversion), `toco` (a string of success status of the conversion),\n `tf` (a string success status of the conversion).\n \"\"\"\n\n np.random.seed(RANDOM_SEED)\n report = {\"toco\": report_lib.NOTRUN, \"tf\": report_lib.FAILED}\n\n # Build graph\n report[\"tf_log\"] = \"\"\n report[\"toco_log\"] = \"\"\n tf.reset_default_graph()\n\n with tf.Graph().as_default():\n with tf.device(\"/cpu:0\"):\n try:\n inputs, outputs = make_graph(param_dict_real)\n except (tf.errors.UnimplementedError,\n tf.errors.InvalidArgumentError, ValueError):\n report[\"tf_log\"] += traceback.format_exc()\n return None, report\n\n sess = tf.Session()\n try:\n baseline_inputs, baseline_outputs = (\n make_test_inputs(param_dict_real, sess, inputs, outputs))\n except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,\n ValueError):\n report[\"tf_log\"] += traceback.format_exc()\n return None, report\n report[\"toco\"] = report_lib.FAILED\n report[\"tf\"] = report_lib.SUCCESS\n # Convert graph to toco\n input_tensors = [(input_tensor.name.split(\":\")[0], input_tensor.shape,\n input_tensor.dtype) for input_tensor in inputs]\n output_tensors = [_normalize_output_name(out.name) for out in outputs]\n # pylint: disable=g-long-ternary\n graph_def = freeze_graph(\n sess,\n tf.global_variables() + inputs +\n outputs) if use_frozen_graph else sess.graph_def\n\n if \"split_tflite_lstm_inputs\" in param_dict_real:\n extra_toco_options.split_tflite_lstm_inputs = param_dict_real[\n \"split_tflite_lstm_inputs\"]\n tflite_model_binary, toco_log = options.tflite_convert_function(\n options,\n graph_def,\n input_tensors,\n output_tensors,\n extra_toco_options=extra_toco_options,\n test_params=param_dict_real)\n report[\"toco\"] = (\n report_lib.SUCCESS\n if tflite_model_binary is not None else report_lib.FAILED)\n report[\"toco_log\"] = toco_log\n\n if options.save_graphdefs:\n archive.writestr(label + \".pbtxt\",\n text_format.MessageToString(graph_def),\n zipfile.ZIP_DEFLATED)\n\n if tflite_model_binary:\n if options.make_edgetpu_tests:\n # Set proper min max values according to input dtype.\n baseline_inputs, baseline_outputs = generate_inputs_outputs(\n tflite_model_binary, min_value=0, max_value=255)\n archive.writestr(label + \".bin\", tflite_model_binary,\n zipfile.ZIP_DEFLATED)\n example = {\"inputs\": baseline_inputs, \"outputs\": baseline_outputs}\n\n example_fp = StringIO()\n write_examples(example_fp, [example])\n archive.writestr(label + \".inputs\", example_fp.getvalue(),\n zipfile.ZIP_DEFLATED)\n\n example_fp2 = StringIO()\n write_test_cases(example_fp2, label + \".bin\", [example])\n archive.writestr(label + \"_tests.txt\", example_fp2.getvalue(),\n zipfile.ZIP_DEFLATED)\n\n zip_manifest.append(label + \"\\n\")\n\n return tflite_model_binary, report\n\n _, report = build_example(label, param_dict)\n\n if report[\"toco\"] == report_lib.FAILED:\n ignore_error = False\n if not options.known_bugs_are_errors:\n for pattern, bug_number in options.known_bugs.items():\n if re.search(pattern, label):\n print(\"Ignored converter error due to bug %s\" % bug_number)\n ignore_error = True\n if not ignore_error:\n toco_errors += 1\n print(\"-----------------\\nconverter error!\\n%s\\n-----------------\\n\" %\n report[\"toco_log\"])\n\n convert_report.append((param_dict, report))\n\n if not options.no_conversion_report:\n report_io = StringIO()\n report_lib.make_report_table(report_io, zip_path, convert_report)\n if options.multi_gen_state:\n archive.writestr(\"report_\" + options.multi_gen_state.test_name + \".html\",\n report_io.getvalue())\n else:\n archive.writestr(\"report.html\", report_io.getvalue())\n\n if options.multi_gen_state:\n options.multi_gen_state.zip_manifest.extend(zip_manifest)\n else:\n archive.writestr(\"manifest.txt\", \"\".join(zip_manifest),\n zipfile.ZIP_DEFLATED)\n\n # Log statistics of what succeeded\n total_conversions = len(convert_report)\n tf_success = sum(\n 1 for x in convert_report if x[1][\"tf\"] == report_lib.SUCCESS)\n toco_success = sum(\n 1 for x in convert_report if x[1][\"toco\"] == report_lib.SUCCESS)\n percent = 0\n if tf_success > 0:\n percent = float(toco_success) / float(tf_success) * 100.\n tf.logging.info((\"Archive %s Considered %d graphs, %d TF evaluated graphs \"\n \" and %d TOCO converted graphs (%.1f%%\"), zip_path,\n total_conversions, tf_success, toco_success, percent)\n\n tf_failures = parameter_count - tf_success\n\n if tf_failures / parameter_count > 0.8:\n raise RuntimeError((\"Test for '%s' is not very useful. \"\n \"TensorFlow fails in %d percent of the cases.\") %\n (zip_path, int(100 * tf_failures / parameter_count)))\n\n if not options.make_edgetpu_tests and tf_failures != expected_tf_failures:\n raise RuntimeError((\"Expected TF to fail %d times while generating '%s', \"\n \"but that happened %d times\") %\n (expected_tf_failures, zip_path, tf_failures))\n\n if not options.ignore_converter_errors and toco_errors > 0:\n raise RuntimeError(\"Found %d errors while generating toco models\" %\n toco_errors)\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for saving and loading using keras save/load APIs with DS.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.distribute import saved_model_test_base as test_base\nfrom tensorflow.python.keras.saving import save\nfrom tensorflow.python.platform import test\n\n\n@testing_utils.run_all_without_tensor_float_32(\n 'Uses Dense layers, which call matmul')\nclass KerasSaveLoadTest(test_base.TestSavedModelBase):\n\n def setUp(self):\n self._root_dir = 'keras_save_load'\n super(KerasSaveLoadTest, self).setUp()\n\n def _save_model(self, model, saved_dir):\n model.save(saved_dir, save_format='tf')\n\n def _load_and_run_model(self,\n distribution,\n saved_dir,\n predict_dataset,\n output_name='output_1'):\n restored_keras_model = save.load_model(saved_dir)\n return restored_keras_model.predict(\n predict_dataset, steps=test_base.PREDICT_STEPS)\n\n @combinations.generate(test_base.simple_models_with_strategies())\n def test_save_no_strategy_restore_strategy(self, model_and_input,\n distribution):\n self.run_test_save_no_strategy_restore_strategy(\n model_and_input, distribution)\n\n @combinations.generate(\n combinations.times(test_base.simple_models_with_strategies(),\n combinations.combine(save_in_scope=[True, False])))\n def test_save_strategy_restore_no_strategy(self, model_and_input,\n distribution, save_in_scope):\n self.run_test_save_strategy_restore_no_strategy(\n model_and_input, distribution, save_in_scope)\n\n @combinations.generate(\n combinations.times(test_base.simple_models_with_strategy_pairs(),\n combinations.combine(save_in_scope=[True, False])))\n def test_save_strategy_restore_strategy(self, model_and_input,\n distribution_for_saving,\n distribution_for_restoring,\n save_in_scope):\n self.run_test_save_strategy_restore_strategy(model_and_input,\n distribution_for_saving,\n distribution_for_restoring,\n save_in_scope)\n\n\nif __name__ == '__main__':\n ops.enable_eager_execution()\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras metrics functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import combinations\nfrom tensorflow.python.keras import metrics\nfrom tensorflow.python.platform import test\n\n\nclass KerasFunctionalMetricsTest(test.TestCase, parameterized.TestCase):\n\n def test_metrics(self):\n with self.cached_session():\n y_a = K.variable(np.random.random((6, 7)))\n y_b = K.variable(np.random.random((6, 7)))\n for metric in [metrics.binary_accuracy, metrics.categorical_accuracy]:\n output = metric(y_a, y_b)\n self.assertEqual(K.eval(output).shape, (6,))\n\n def test_sparse_categorical_accuracy_int(self):\n with self.cached_session():\n metric = metrics.sparse_categorical_accuracy\n y_true = K.variable(np.random.randint(0, 7, (6,)))\n y_pred = K.variable(np.random.random((6, 7)))\n self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))\n\n # Test correctness if the shape of y_true is (num_samples,)\n y_true = K.variable([1., 0., 0., 0.])\n y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])\n self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])\n\n # Test correctness if the shape of y_true is (num_samples, 1)\n y_true = K.variable([[1.], [0.], [0.], [0.]])\n y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])\n self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])\n\n # Test correctness if the shape of y_true is (batch_size, seq_length) and\n # y_pred is (batch_size, seq_length, num_classes)\n y_pred = K.variable(\n np.array([[[0.2, 0.3, 0.1], [0.1, 0.2, 0.7]],\n [[0.3, 0.2, 0.1], [0.7, 0.2, 0.1]]]))\n y_true = K.variable(np.array([[1, 0], [1, 0]]))\n self.assertAllEqual(K.eval(metric(y_true, y_pred)), [[1., 0.], [0., 1.]])\n\n def test_sparse_categorical_accuracy_float(self):\n with self.cached_session():\n metric = metrics.sparse_categorical_accuracy\n y_true = K.variable(np.random.random((6,)))\n y_pred = K.variable(np.random.random((6, 7)))\n self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))\n\n @combinations.generate(combinations.combine(mode=['eager']))\n def test_sparse_categorical_accuracy_eager(self):\n \"\"\"Tests that ints passed in via Eager return results. See b/113504761.\"\"\"\n metric = metrics.sparse_categorical_accuracy\n y_true = np.arange(6).reshape([6, 1])\n y_pred = np.arange(36).reshape([6, 6])\n self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.])\n\n @combinations.generate(combinations.combine(mode=['eager']))\n def test_sparse_categorical_accuracy_float_eager(self):\n \"\"\"Tests that floats passed in via Eager return results. See b/113504761.\"\"\"\n metric = metrics.sparse_categorical_accuracy\n y_true = np.arange(6, dtype=np.float32).reshape([6, 1])\n y_pred = np.arange(36).reshape([6, 6])\n self.assertAllEqual(metric(y_true, y_pred), [0., 0., 0., 0., 0., 1.])\n\n def test_sparse_top_k_categorical_accuracy(self):\n with self.cached_session():\n # Test correctness if the shape of y_true is (num_samples, 1)\n y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))\n y_true = K.variable(np.array([[1], [0]]))\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))\n self.assertEqual(np.mean(result), 1)\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))\n self.assertEqual(np.mean(result), 0.5)\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))\n self.assertEqual(np.mean(result), 0.)\n\n # Test correctness if the shape of y_true is (num_samples,)\n y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))\n y_true = K.variable(np.array([1, 0]))\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))\n self.assertEqual(np.mean(result), 1)\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))\n self.assertEqual(np.mean(result), 0.5)\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))\n self.assertEqual(np.mean(result), 0.)\n\n # Test correctness if the shape of y_true is (batch_size, seq_length) and\n # y_pred is (batch_size, seq_length, num_classes)\n y_pred = K.variable(\n np.array([[[0.3, 0.2, 0.1], [0.1, 0.2, 0.7], [0.1, 0.2, 0.7]],\n [[0.3, 0.2, 0.1], [0.1, 0.2, 0.7], [0.3, 0.2, 0.1]]]))\n y_true = K.variable(np.array([[1, 0, 0], [1, 0, 1]]))\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))\n self.assertEqual(np.mean(result), 1)\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))\n self.assertEqual(np.mean(result), 0.5)\n result = K.eval(\n metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))\n self.assertEqual(np.mean(result), 0.)\n\n def test_top_k_categorical_accuracy(self):\n with self.cached_session():\n y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))\n y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))\n result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))\n self.assertEqual(np.mean(result), 1)\n result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))\n self.assertEqual(np.mean(result), 0.5)\n result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))\n self.assertEqual(np.mean(result), 0.)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Base class for linear operators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport contextlib\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linalg_impl as linalg\nfrom tensorflow.python.ops.linalg import linear_operator_algebra\nfrom tensorflow.python.ops.linalg import linear_operator_util\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\"LinearOperator\"]\n\n\n# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.\n@tf_export(\"linalg.LinearOperator\")\[email protected]_metaclass(abc.ABCMeta)\nclass LinearOperator(module.Module):\n \"\"\"Base class defining a [batch of] linear operator[s].\n\n Subclasses of `LinearOperator` provide access to common methods on a\n (batch) matrix, without the need to materialize the matrix. This allows:\n\n * Matrix free computations\n * Operators that take advantage of special structure, while providing a\n consistent API to users.\n\n #### Subclassing\n\n To enable a public method, subclasses should implement the leading-underscore\n version of the method. The argument signature should be identical except for\n the omission of `name=\"...\"`. For example, to enable\n `matmul(x, adjoint=False, name=\"matmul\")` a subclass should implement\n `_matmul(x, adjoint=False)`.\n\n #### Performance contract\n\n Subclasses should only implement the assert methods\n (e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`\n time.\n\n Class docstrings should contain an explanation of computational complexity.\n Since this is a high-performance library, attention should be paid to detail,\n and explanations can include constants as well as Big-O notation.\n\n #### Shape compatibility\n\n `LinearOperator` subclasses should operate on a [batch] matrix with\n compatible shape. Class docstrings should define what is meant by compatible\n shape. Some subclasses may not support batching.\n\n Examples:\n\n `x` is a batch matrix with compatible shape for `matmul` if\n\n ```\n operator.shape = [B1,...,Bb] + [M, N], b >= 0,\n x.shape = [B1,...,Bb] + [N, R]\n ```\n\n `rhs` is a batch matrix with compatible shape for `solve` if\n\n ```\n operator.shape = [B1,...,Bb] + [M, N], b >= 0,\n rhs.shape = [B1,...,Bb] + [M, R]\n ```\n\n #### Example docstring for subclasses.\n\n This operator acts like a (batch) matrix `A` with shape\n `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n an `m x n` matrix. Again, this matrix `A` may not be materialized, but for\n purposes of identifying and working with compatible arguments the shape is\n relevant.\n\n Examples:\n\n ```python\n some_tensor = ... shape = ????\n operator = MyLinOp(some_tensor)\n\n operator.shape()\n ==> [2, 4, 4]\n\n operator.log_abs_determinant()\n ==> Shape [2] Tensor\n\n x = ... Shape [2, 4, 5] Tensor\n\n operator.matmul(x)\n ==> Shape [2, 4, 5] Tensor\n ```\n\n #### Shape compatibility\n\n This operator acts on batch matrices with compatible shape.\n FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE\n\n #### Performance\n\n FILL THIS IN\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n\n #### Initialization parameters\n\n All subclasses of `LinearOperator` are expected to pass a `parameters`\n argument to `super().__init__()`. This should be a `dict` containing\n the unadulterated arguments passed to the subclass `__init__`. For example,\n `MyLinearOperator` with an initializer should look like:\n\n ```python\n def __init__(self, operator, is_square=False, name=None):\n parameters = dict(\n operator=operator,\n is_square=is_square,\n name=name\n )\n ...\n super().__init__(..., parameters=parameters)\n ```\n\n Users can then access `my_linear_operator.parameters` to see all arguments\n passed to its initializer.\n \"\"\"\n\n # TODO(b/143910018) Remove graph_parents in V3.\n @deprecation.deprecated_args(None, \"Do not pass `graph_parents`. They will \"\n \" no longer be used.\", \"graph_parents\")\n def __init__(self,\n dtype,\n graph_parents=None,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=None,\n parameters=None):\n r\"\"\"Initialize the `LinearOperator`.\n\n **This is a private method for subclass use.**\n **Subclasses should copy-paste this `__init__` documentation.**\n\n Args:\n dtype: The type of the this `LinearOperator`. Arguments to `matmul` and\n `solve` will have to be this type.\n graph_parents: (Deprecated) Python list of graph prerequisites of this\n `LinearOperator` Typically tensors that are passed during initialization\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. If `dtype` is real, this is equivalent to being symmetric.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`.\n parameters: Python `dict` of parameters used to instantiate this\n `LinearOperator`.\n\n Raises:\n ValueError: If any member of graph_parents is `None` or not a `Tensor`.\n ValueError: If hints are set incorrectly.\n \"\"\"\n # Check and auto-set flags.\n if is_positive_definite:\n if is_non_singular is False:\n raise ValueError(\"A positive definite matrix is always non-singular.\")\n is_non_singular = True\n\n if is_non_singular:\n if is_square is False:\n raise ValueError(\"A non-singular matrix is always square.\")\n is_square = True\n\n if is_self_adjoint:\n if is_square is False:\n raise ValueError(\"A self-adjoint matrix is always square.\")\n is_square = True\n\n self._is_square_set_or_implied_by_hints = is_square\n\n if graph_parents is not None:\n self._set_graph_parents(graph_parents)\n else:\n self._graph_parents = []\n self._dtype = dtypes.as_dtype(dtype).base_dtype if dtype else dtype\n self._is_non_singular = is_non_singular\n self._is_self_adjoint = is_self_adjoint\n self._is_positive_definite = is_positive_definite\n self._parameters = self._no_dependency(parameters)\n self._parameters_sanitized = False\n self._name = name or type(self).__name__\n\n @contextlib.contextmanager\n def _name_scope(self, name=None):\n \"\"\"Helper function to standardize op scope.\"\"\"\n full_name = self.name\n if name is not None:\n full_name += \"/\" + name\n with ops.name_scope(full_name) as scope:\n yield scope\n\n @property\n def parameters(self):\n \"\"\"Dictionary of parameters used to instantiate this `LinearOperator`.\"\"\"\n return dict(self._parameters)\n\n @property\n def dtype(self):\n \"\"\"The `DType` of `Tensor`s handled by this `LinearOperator`.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Name prepended to all ops created by this `LinearOperator`.\"\"\"\n return self._name\n\n @property\n @deprecation.deprecated(None, \"Do not call `graph_parents`.\")\n def graph_parents(self):\n \"\"\"List of graph dependencies of this `LinearOperator`.\"\"\"\n return self._graph_parents\n\n @property\n def is_non_singular(self):\n return self._is_non_singular\n\n @property\n def is_self_adjoint(self):\n return self._is_self_adjoint\n\n @property\n def is_positive_definite(self):\n return self._is_positive_definite\n\n @property\n def is_square(self):\n \"\"\"Return `True/False` depending on if this operator is square.\"\"\"\n # Static checks done after __init__. Why? Because domain/range dimension\n # sometimes requires lots of work done in the derived class after init.\n auto_square_check = self.domain_dimension == self.range_dimension\n if self._is_square_set_or_implied_by_hints is False and auto_square_check:\n raise ValueError(\n \"User set is_square hint to False, but the operator was square.\")\n if self._is_square_set_or_implied_by_hints is None:\n return auto_square_check\n\n return self._is_square_set_or_implied_by_hints\n\n @abc.abstractmethod\n def _shape(self):\n # Write this in derived class to enable all static shape methods.\n raise NotImplementedError(\"_shape is not implemented.\")\n\n @property\n def shape(self):\n \"\"\"`TensorShape` of this `LinearOperator`.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns\n `TensorShape([B1,...,Bb, M, N])`, equivalent to `A.shape`.\n\n Returns:\n `TensorShape`, statically determined, may be undefined.\n \"\"\"\n return self._shape()\n\n def _shape_tensor(self):\n # This is not an abstractmethod, since we want derived classes to be able to\n # override this with optional kwargs, which can reduce the number of\n # `convert_to_tensor` calls. See derived classes for examples.\n raise NotImplementedError(\"_shape_tensor is not implemented.\")\n\n def shape_tensor(self, name=\"shape_tensor\"):\n \"\"\"Shape of this `LinearOperator`, determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n with self._name_scope(name):\n # Prefer to use statically defined shape if available.\n if self.shape.is_fully_defined():\n return linear_operator_util.shape_tensor(self.shape.as_list())\n else:\n return self._shape_tensor()\n\n @property\n def batch_shape(self):\n \"\"\"`TensorShape` of batch dimensions of this `LinearOperator`.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns\n `TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]`\n\n Returns:\n `TensorShape`, statically determined, may be undefined.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n return self.shape[:-2]\n\n def batch_shape_tensor(self, name=\"batch_shape_tensor\"):\n \"\"\"Shape of batch dimensions of this operator, determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding\n `[B1,...,Bb]`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name):\n return self._batch_shape_tensor()\n\n def _batch_shape_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n if self.batch_shape.is_fully_defined():\n return linear_operator_util.shape_tensor(\n self.batch_shape.as_list(), name=\"batch_shape\")\n else:\n shape = self.shape_tensor() if shape is None else shape\n return shape[:-2]\n\n @property\n def tensor_rank(self, name=\"tensor_rank\"):\n \"\"\"Rank (in the sense of tensors) of matrix corresponding to this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Python integer, or None if the tensor rank is undefined.\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name):\n return self.shape.ndims\n\n def tensor_rank_tensor(self, name=\"tensor_rank_tensor\"):\n \"\"\"Rank (in the sense of tensors) of matrix corresponding to this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`, determined at runtime.\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name):\n return self._tensor_rank_tensor()\n\n def _tensor_rank_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n if self.tensor_rank is not None:\n return ops.convert_to_tensor_v2_with_dispatch(self.tensor_rank)\n else:\n shape = self.shape_tensor() if shape is None else shape\n return array_ops.size(shape)\n\n @property\n def domain_dimension(self):\n \"\"\"Dimension (in the sense of vector spaces) of the domain of this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `N`.\n\n Returns:\n `Dimension` object.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n if self.shape.rank is None:\n return tensor_shape.Dimension(None)\n else:\n return self.shape.dims[-1]\n\n def domain_dimension_tensor(self, name=\"domain_dimension_tensor\"):\n \"\"\"Dimension (in the sense of vector spaces) of the domain of this operator.\n\n Determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `N`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name):\n return self._domain_dimension_tensor()\n\n def _domain_dimension_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n dim_value = tensor_shape.dimension_value(self.domain_dimension)\n if dim_value is not None:\n return ops.convert_to_tensor_v2_with_dispatch(dim_value)\n else:\n shape = self.shape_tensor() if shape is None else shape\n return shape[-1]\n\n @property\n def range_dimension(self):\n \"\"\"Dimension (in the sense of vector spaces) of the range of this operator.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `M`.\n\n Returns:\n `Dimension` object.\n \"\"\"\n # Derived classes get this \"for free\" once .shape is implemented.\n if self.shape.dims:\n return self.shape.dims[-2]\n else:\n return tensor_shape.Dimension(None)\n\n def range_dimension_tensor(self, name=\"range_dimension_tensor\"):\n \"\"\"Dimension (in the sense of vector spaces) of the range of this operator.\n\n Determined at runtime.\n\n If this operator acts like the batch matrix `A` with\n `A.shape = [B1,...,Bb, M, N]`, then this returns `M`.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `int32` `Tensor`\n \"\"\"\n # Derived classes get this \"for free\" once .shape() is implemented.\n with self._name_scope(name):\n return self._range_dimension_tensor()\n\n def _range_dimension_tensor(self, shape=None):\n # `shape` may be passed in if this can be pre-computed in a\n # more efficient manner, e.g. without excessive Tensor conversions.\n dim_value = tensor_shape.dimension_value(self.range_dimension)\n if dim_value is not None:\n return ops.convert_to_tensor_v2_with_dispatch(dim_value)\n else:\n shape = self.shape_tensor() if shape is None else shape\n return shape[-2]\n\n def _assert_non_singular(self):\n \"\"\"Private default implementation of _assert_non_singular.\"\"\"\n logging.warn(\n \"Using (possibly slow) default implementation of assert_non_singular.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n return self.assert_positive_definite()\n else:\n singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)\n # TODO(langmore) Add .eig and .cond as methods.\n cond = (math_ops.reduce_max(singular_values, axis=-1) /\n math_ops.reduce_min(singular_values, axis=-1))\n return check_ops.assert_less(\n cond,\n self._max_condition_number_to_be_non_singular(),\n message=\"Singular matrix up to precision epsilon.\")\n\n def _max_condition_number_to_be_non_singular(self):\n \"\"\"Return the maximum condition number that we consider nonsingular.\"\"\"\n with ops.name_scope(\"max_nonsingular_condition_number\"):\n dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps\n eps = math_ops.cast(\n math_ops.reduce_max([\n 100.,\n math_ops.cast(self.range_dimension_tensor(), self.dtype),\n math_ops.cast(self.domain_dimension_tensor(), self.dtype)\n ]), self.dtype) * dtype_eps\n return 1. / eps\n\n def assert_non_singular(self, name=\"assert_non_singular\"):\n \"\"\"Returns an `Op` that asserts this operator is non singular.\n\n This operator is considered non-singular if\n\n ```\n ConditionNumber < max{100, range_dimension, domain_dimension} * eps,\n eps := np.finfo(self.dtype.as_numpy_dtype).eps\n ```\n\n Args:\n name: A string name to prepend to created ops.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is singular.\n \"\"\"\n with self._name_scope(name):\n return self._assert_non_singular()\n\n def _assert_positive_definite(self):\n \"\"\"Default implementation of _assert_positive_definite.\"\"\"\n logging.warn(\n \"Using (possibly slow) default implementation of \"\n \"assert_positive_definite.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n # If the operator is self-adjoint, then checking that\n # Cholesky decomposition succeeds + results in positive diag is necessary\n # and sufficient.\n if self.is_self_adjoint:\n return check_ops.assert_positive(\n array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),\n message=\"Matrix was not positive definite.\")\n # We have no generic check for positive definite.\n raise NotImplementedError(\"assert_positive_definite is not implemented.\")\n\n def assert_positive_definite(self, name=\"assert_positive_definite\"):\n \"\"\"Returns an `Op` that asserts this operator is positive definite.\n\n Here, positive definite means that the quadratic form `x^H A x` has positive\n real part for all nonzero `x`. Note that we do not require the operator to\n be self-adjoint to be positive definite.\n\n Args:\n name: A name to give this `Op`.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is not positive definite.\n \"\"\"\n with self._name_scope(name):\n return self._assert_positive_definite()\n\n def _assert_self_adjoint(self):\n dense = self.to_dense()\n logging.warn(\n \"Using (possibly slow) default implementation of assert_self_adjoint.\"\n \" Requires conversion to a dense matrix.\")\n return check_ops.assert_equal(\n dense,\n linalg.adjoint(dense),\n message=\"Matrix was not equal to its adjoint.\")\n\n def assert_self_adjoint(self, name=\"assert_self_adjoint\"):\n \"\"\"Returns an `Op` that asserts this operator is self-adjoint.\n\n Here we check that this operator is *exactly* equal to its hermitian\n transpose.\n\n Args:\n name: A string name to prepend to created ops.\n\n Returns:\n An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if\n the operator is not self-adjoint.\n \"\"\"\n with self._name_scope(name):\n return self._assert_self_adjoint()\n\n def _check_input_dtype(self, arg):\n \"\"\"Check that arg.dtype == self.dtype.\"\"\"\n if arg.dtype.base_dtype != self.dtype:\n raise TypeError(\n \"Expected argument to have dtype %s. Found: %s in tensor %s\" %\n (self.dtype, arg.dtype, arg))\n\n @abc.abstractmethod\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n raise NotImplementedError(\"_matmul is not implemented.\")\n\n def matmul(self, x, adjoint=False, adjoint_arg=False, name=\"matmul\"):\n \"\"\"Transform [batch] matrix `x` with left multiplication: `x --> Ax`.\n\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n operator.shape = [..., M, N]\n\n X = ... # shape [..., N, R], batch matrix, R > 0.\n\n Y = operator.matmul(X)\n Y.shape\n ==> [..., M, R]\n\n Y[..., :, r] = sum_j A[..., :, j] X[j, r]\n ```\n\n Args:\n x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as\n `self`. See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.\n adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is\n the hermitian transpose (transposition and complex conjugation).\n name: A name for this `Op`.\n\n Returns:\n A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`\n as `self`.\n \"\"\"\n if isinstance(x, LinearOperator):\n left_operator = self.adjoint() if adjoint else self\n right_operator = x.adjoint() if adjoint_arg else x\n\n if (right_operator.range_dimension is not None and\n left_operator.domain_dimension is not None and\n right_operator.range_dimension != left_operator.domain_dimension):\n raise ValueError(\n \"Operators are incompatible. Expected `x` to have dimension\"\n \" {} but got {}.\".format(\n left_operator.domain_dimension, right_operator.range_dimension))\n with self._name_scope(name):\n return linear_operator_algebra.matmul(left_operator, right_operator)\n\n with self._name_scope(name):\n x = ops.convert_to_tensor_v2_with_dispatch(x, name=\"x\")\n self._check_input_dtype(x)\n\n self_dim = -2 if adjoint else -1\n arg_dim = -1 if adjoint_arg else -2\n tensor_shape.dimension_at_index(\n self.shape, self_dim).assert_is_compatible_with(\n x.shape[arg_dim])\n\n return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def __matmul__(self, other):\n return self.matmul(other)\n\n def _matvec(self, x, adjoint=False):\n x_mat = array_ops.expand_dims(x, axis=-1)\n y_mat = self.matmul(x_mat, adjoint=adjoint)\n return array_ops.squeeze(y_mat, axis=-1)\n\n def matvec(self, x, adjoint=False, name=\"matvec\"):\n \"\"\"Transform [batch] vector `x` with left multiplication: `x --> Ax`.\n\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n\n X = ... # shape [..., N], batch vector\n\n Y = operator.matvec(X)\n Y.shape\n ==> [..., M]\n\n Y[..., :] = sum_j A[..., :, j] X[..., j]\n ```\n\n Args:\n x: `Tensor` with compatible shape and same `dtype` as `self`.\n `x` is treated as a [batch] vector meaning for every set of leading\n dimensions, the last dimension defines a vector.\n See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.\n name: A name for this `Op`.\n\n Returns:\n A `Tensor` with shape `[..., M]` and same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name):\n x = ops.convert_to_tensor_v2_with_dispatch(x, name=\"x\")\n self._check_input_dtype(x)\n self_dim = -2 if adjoint else -1\n tensor_shape.dimension_at_index(\n self.shape, self_dim).assert_is_compatible_with(x.shape[-1])\n return self._matvec(x, adjoint=adjoint)\n\n def _determinant(self):\n logging.warn(\n \"Using (possibly slow) default implementation of determinant.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n return math_ops.exp(self.log_abs_determinant())\n return linalg_ops.matrix_determinant(self.to_dense())\n\n def determinant(self, name=\"det\"):\n \"\"\"Determinant for every batch member.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\n Raises:\n NotImplementedError: If `self.is_square` is `False`.\n \"\"\"\n if self.is_square is False:\n raise NotImplementedError(\n \"Determinant not implemented for an operator that is expected to \"\n \"not be square.\")\n with self._name_scope(name):\n return self._determinant()\n\n def _log_abs_determinant(self):\n logging.warn(\n \"Using (possibly slow) default implementation of determinant.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n if self._can_use_cholesky():\n diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))\n return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])\n _, log_abs_det = linalg.slogdet(self.to_dense())\n return log_abs_det\n\n def log_abs_determinant(self, name=\"log_abs_det\"):\n \"\"\"Log absolute value of determinant for every batch member.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\n Raises:\n NotImplementedError: If `self.is_square` is `False`.\n \"\"\"\n if self.is_square is False:\n raise NotImplementedError(\n \"Determinant not implemented for an operator that is expected to \"\n \"not be square.\")\n with self._name_scope(name):\n return self._log_abs_determinant()\n\n def _dense_solve(self, rhs, adjoint=False, adjoint_arg=False):\n \"\"\"Solve by conversion to a dense matrix.\"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise NotImplementedError(\n \"Solve is not yet implemented for non-square operators.\")\n rhs = linalg.adjoint(rhs) if adjoint_arg else rhs\n if self._can_use_cholesky():\n return linalg_ops.cholesky_solve(\n linalg_ops.cholesky(self.to_dense()), rhs)\n return linear_operator_util.matrix_solve_with_broadcast(\n self.to_dense(), rhs, adjoint=adjoint)\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n \"\"\"Default implementation of _solve.\"\"\"\n logging.warn(\n \"Using (possibly slow) default implementation of solve.\"\n \" Requires conversion to a dense matrix and O(N^3) operations.\")\n return self._dense_solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def solve(self, rhs, adjoint=False, adjoint_arg=False, name=\"solve\"):\n \"\"\"Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.\n\n The returned `Tensor` will be close to an exact solution if `A` is well\n conditioned. Otherwise closeness will vary. See class docstring for details.\n\n Examples:\n\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n operator.shape = [..., M, N]\n\n # Solve R > 0 linear systems for every member of the batch.\n RHS = ... # shape [..., M, R]\n\n X = operator.solve(RHS)\n # X[..., :, r] is the solution to the r'th linear system\n # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]\n\n operator.matmul(X)\n ==> RHS\n ```\n\n Args:\n rhs: `Tensor` with same `dtype` as this operator and compatible shape.\n `rhs` is treated like a [batch] matrix meaning for every set of leading\n dimensions, the last two dimensions defines a matrix.\n See class docstring for definition of compatibility.\n adjoint: Python `bool`. If `True`, solve the system involving the adjoint\n of this `LinearOperator`: `A^H X = rhs`.\n adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`\n is the hermitian transpose (transposition and complex conjugation).\n name: A name scope to use for ops added by this method.\n\n Returns:\n `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.\n\n Raises:\n NotImplementedError: If `self.is_non_singular` or `is_square` is False.\n \"\"\"\n if self.is_non_singular is False:\n raise NotImplementedError(\n \"Exact solve not implemented for an operator that is expected to \"\n \"be singular.\")\n if self.is_square is False:\n raise NotImplementedError(\n \"Exact solve not implemented for an operator that is expected to \"\n \"not be square.\")\n if isinstance(rhs, LinearOperator):\n left_operator = self.adjoint() if adjoint else self\n right_operator = rhs.adjoint() if adjoint_arg else rhs\n\n if (right_operator.range_dimension is not None and\n left_operator.domain_dimension is not None and\n right_operator.range_dimension != left_operator.domain_dimension):\n raise ValueError(\n \"Operators are incompatible. Expected `rhs` to have dimension\"\n \" {} but got {}.\".format(\n left_operator.domain_dimension, right_operator.range_dimension))\n with self._name_scope(name):\n return linear_operator_algebra.solve(left_operator, right_operator)\n\n with self._name_scope(name):\n rhs = ops.convert_to_tensor_v2_with_dispatch(rhs, name=\"rhs\")\n self._check_input_dtype(rhs)\n\n self_dim = -1 if adjoint else -2\n arg_dim = -1 if adjoint_arg else -2\n tensor_shape.dimension_at_index(\n self.shape, self_dim).assert_is_compatible_with(\n rhs.shape[arg_dim])\n\n return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)\n\n def _solvevec(self, rhs, adjoint=False):\n \"\"\"Default implementation of _solvevec.\"\"\"\n rhs_mat = array_ops.expand_dims(rhs, axis=-1)\n solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n return array_ops.squeeze(solution_mat, axis=-1)\n\n def solvevec(self, rhs, adjoint=False, name=\"solve\"):\n \"\"\"Solve single equation with best effort: `A X = rhs`.\n\n The returned `Tensor` will be close to an exact solution if `A` is well\n conditioned. Otherwise closeness will vary. See class docstring for details.\n\n Examples:\n\n ```python\n # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\n operator = LinearOperator(...)\n operator.shape = [..., M, N]\n\n # Solve one linear system for every member of the batch.\n RHS = ... # shape [..., M]\n\n X = operator.solvevec(RHS)\n # X is the solution to the linear system\n # sum_j A[..., :, j] X[..., j] = RHS[..., :]\n\n operator.matvec(X)\n ==> RHS\n ```\n\n Args:\n rhs: `Tensor` with same `dtype` as this operator.\n `rhs` is treated like a [batch] vector meaning for every set of leading\n dimensions, the last dimension defines a vector. See class docstring\n for definition of compatibility regarding batch dimensions.\n adjoint: Python `bool`. If `True`, solve the system involving the adjoint\n of this `LinearOperator`: `A^H X = rhs`.\n name: A name scope to use for ops added by this method.\n\n Returns:\n `Tensor` with shape `[...,N]` and same `dtype` as `rhs`.\n\n Raises:\n NotImplementedError: If `self.is_non_singular` or `is_square` is False.\n \"\"\"\n with self._name_scope(name):\n rhs = ops.convert_to_tensor_v2_with_dispatch(rhs, name=\"rhs\")\n self._check_input_dtype(rhs)\n self_dim = -1 if adjoint else -2\n tensor_shape.dimension_at_index(\n self.shape, self_dim).assert_is_compatible_with(rhs.shape[-1])\n\n return self._solvevec(rhs, adjoint=adjoint)\n\n def adjoint(self, name=\"adjoint\"):\n \"\"\"Returns the adjoint of the current `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, return `A*`.\n Note that calling `self.adjoint()` and `self.H` are equivalent.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `LinearOperator` which represents the adjoint of this `LinearOperator`.\n \"\"\"\n if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison\n return self\n with self._name_scope(name):\n return linear_operator_algebra.adjoint(self)\n\n # self.H is equivalent to self.adjoint().\n H = property(adjoint, None)\n\n def inverse(self, name=\"inverse\"):\n \"\"\"Returns the Inverse of this `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, return a `LinearOperator`\n representing `A^-1`.\n\n Args:\n name: A name scope to use for ops added by this method.\n\n Returns:\n `LinearOperator` representing inverse of this matrix.\n\n Raises:\n ValueError: When the `LinearOperator` is not hinted to be `non_singular`.\n \"\"\"\n if self.is_square is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"Cannot take the Inverse: This operator represents \"\n \"a non square matrix.\")\n if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"Cannot take the Inverse: This operator represents \"\n \"a singular matrix.\")\n\n with self._name_scope(name):\n return linear_operator_algebra.inverse(self)\n\n def cholesky(self, name=\"cholesky\"):\n \"\"\"Returns a Cholesky factor as a `LinearOperator`.\n\n Given `A` representing this `LinearOperator`, if `A` is positive definite\n self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky\n decomposition.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n `LinearOperator` which represents the lower triangular matrix\n in the Cholesky decomposition.\n\n Raises:\n ValueError: When the `LinearOperator` is not hinted to be positive\n definite and self adjoint.\n \"\"\"\n\n if not self._can_use_cholesky():\n raise ValueError(\"Cannot take the Cholesky decomposition: \"\n \"Not a positive definite self adjoint matrix.\")\n with self._name_scope(name):\n return linear_operator_algebra.cholesky(self)\n\n def _to_dense(self):\n \"\"\"Generic and often inefficient implementation. Override often.\"\"\"\n if self.batch_shape.is_fully_defined():\n batch_shape = self.batch_shape\n else:\n batch_shape = self.batch_shape_tensor()\n\n dim_value = tensor_shape.dimension_value(self.domain_dimension)\n if dim_value is not None:\n n = dim_value\n else:\n n = self.domain_dimension_tensor()\n\n eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)\n return self.matmul(eye)\n\n def to_dense(self, name=\"to_dense\"):\n \"\"\"Return a dense (batch) matrix representing this operator.\"\"\"\n with self._name_scope(name):\n return self._to_dense()\n\n def _diag_part(self):\n \"\"\"Generic and often inefficient implementation. Override often.\"\"\"\n return array_ops.matrix_diag_part(self.to_dense())\n\n def diag_part(self, name=\"diag_part\"):\n \"\"\"Efficiently get the [batch] diagonal part of this operator.\n\n If this operator has shape `[B1,...,Bb, M, N]`, this returns a\n `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where\n `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.\n\n ```\n my_operator = LinearOperatorDiag([1., 2.])\n\n # Efficiently get the diagonal\n my_operator.diag_part()\n ==> [1., 2.]\n\n # Equivalent, but inefficient method\n tf.linalg.diag_part(my_operator.to_dense())\n ==> [1., 2.]\n ```\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n diag_part: A `Tensor` of same `dtype` as self.\n \"\"\"\n with self._name_scope(name):\n return self._diag_part()\n\n def _trace(self):\n return math_ops.reduce_sum(self.diag_part(), axis=-1)\n\n def trace(self, name=\"trace\"):\n \"\"\"Trace of the linear operator, equal to sum of `self.diag_part()`.\n\n If the operator is square, this is also the sum of the eigenvalues.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name):\n return self._trace()\n\n def _add_to_tensor(self, x):\n # Override if a more efficient implementation is available.\n return self.to_dense() + x\n\n def add_to_tensor(self, x, name=\"add_to_tensor\"):\n \"\"\"Add matrix represented by this operator to `x`. Equivalent to `A + x`.\n\n Args:\n x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.\n name: A name to give this `Op`.\n\n Returns:\n A `Tensor` with broadcast shape and same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name):\n x = ops.convert_to_tensor_v2_with_dispatch(x, name=\"x\")\n self._check_input_dtype(x)\n return self._add_to_tensor(x)\n\n def _eigvals(self):\n return linalg_ops.self_adjoint_eigvals(self.to_dense())\n\n def eigvals(self, name=\"eigvals\"):\n \"\"\"Returns the eigenvalues of this linear operator.\n\n If the operator is marked as self-adjoint (via `is_self_adjoint`)\n this computation can be more efficient.\n\n Note: This currently only supports self-adjoint operators.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Shape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`.\n \"\"\"\n if not self.is_self_adjoint:\n raise NotImplementedError(\"Only self-adjoint matrices are supported.\")\n with self._name_scope(name):\n return self._eigvals()\n\n def _cond(self):\n if not self.is_self_adjoint:\n # In general the condition number is the ratio of the\n # absolute value of the largest and smallest singular values.\n vals = linalg_ops.svd(self.to_dense(), compute_uv=False)\n else:\n # For self-adjoint matrices, and in general normal matrices,\n # we can use eigenvalues.\n vals = math_ops.abs(self._eigvals())\n\n return (math_ops.reduce_max(vals, axis=-1) /\n math_ops.reduce_min(vals, axis=-1))\n\n def cond(self, name=\"cond\"):\n \"\"\"Returns the condition number of this linear operator.\n\n Args:\n name: A name for this `Op`.\n\n Returns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name):\n return self._cond()\n\n def _can_use_cholesky(self):\n return self.is_self_adjoint and self.is_positive_definite\n\n def _set_graph_parents(self, graph_parents):\n \"\"\"Set self._graph_parents. Called during derived class init.\n\n This method allows derived classes to set graph_parents, without triggering\n a deprecation warning (which is invoked if `graph_parents` is passed during\n `__init__`.\n\n Args:\n graph_parents: Iterable over Tensors.\n \"\"\"\n # TODO(b/143910018) Remove this function in V3.\n graph_parents = [] if graph_parents is None else graph_parents\n for i, t in enumerate(graph_parents):\n if t is None or not (linear_operator_util.is_ref(t) or\n tensor_util.is_tensor(t)):\n raise ValueError(\"Graph parent item %d is not a Tensor; %s.\" % (i, t))\n self._graph_parents = graph_parents\n\n\n# Overrides for tf.linalg functions. This allows a LinearOperator to be used in\n# place of a Tensor.\n# For instance tf.trace(linop) and linop.trace() both work.\n\n\[email protected]_for_types(linalg.adjoint, LinearOperator)\ndef _adjoint(matrix, name=None):\n return matrix.adjoint(name)\n\n\[email protected]_for_types(linalg.cholesky, LinearOperator)\ndef _cholesky(input, name=None): # pylint:disable=redefined-builtin\n return input.cholesky(name)\n\n\n# The signature has to match with the one in python/op/array_ops.py,\n# so we have k, padding_value, and align even though we don't use them here.\n# pylint:disable=unused-argument\[email protected]_for_types(linalg.diag_part, LinearOperator)\ndef _diag_part(\n input, # pylint:disable=redefined-builtin\n name=\"diag_part\",\n k=0,\n padding_value=0,\n align=\"RIGHT_LEFT\"):\n return input.diag_part(name)\n# pylint:enable=unused-argument\n\n\[email protected]_for_types(linalg.det, LinearOperator)\ndef _det(input, name=None): # pylint:disable=redefined-builtin\n return input.determinant(name)\n\n\[email protected]_for_types(linalg.inv, LinearOperator)\ndef _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin\n inv = input.inverse(name)\n if adjoint:\n inv = inv.adjoint()\n return inv\n\n\[email protected]_for_types(linalg.logdet, LinearOperator)\ndef _logdet(matrix, name=None):\n if matrix.is_positive_definite and matrix.is_self_adjoint:\n return matrix.log_abs_determinant(name)\n raise ValueError(\"Expected matrix to be self-adjoint positive definite.\")\n\n\[email protected]_for_types(math_ops.matmul, LinearOperator)\ndef _matmul( # pylint:disable=missing-docstring\n a,\n b,\n transpose_a=False,\n transpose_b=False,\n adjoint_a=False,\n adjoint_b=False,\n a_is_sparse=False,\n b_is_sparse=False,\n name=None):\n if transpose_a or transpose_b:\n raise ValueError(\"Transposing not supported at this time.\")\n if a_is_sparse or b_is_sparse:\n raise ValueError(\"Sparse methods not supported at this time.\")\n if not isinstance(a, LinearOperator):\n # We use the identity (B^HA^H)^H = AB\n adjoint_matmul = b.matmul(\n a,\n adjoint=(not adjoint_b),\n adjoint_arg=(not adjoint_a),\n name=name)\n return linalg.adjoint(adjoint_matmul)\n return a.matmul(\n b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name)\n\n\[email protected]_for_types(linalg.solve, LinearOperator)\ndef _solve(\n matrix,\n rhs,\n adjoint=False,\n name=None):\n if not isinstance(matrix, LinearOperator):\n raise ValueError(\"Passing in `matrix` as a Tensor and `rhs` as a \"\n \"LinearOperator is not supported.\")\n return matrix.solve(rhs, adjoint=adjoint, name=name)\n\n\[email protected]_for_types(linalg.trace, LinearOperator)\ndef _trace(x, name=None):\n return x.trace(name)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Utilities related to loss functions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.engine import keras_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.losses import loss_reduction\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n# TODO(joshl/psv): Update references to ReductionV2 to point to its\n# new location.\nReductionV2 = keras_export( # pylint: disable=invalid-name\n 'keras.losses.Reduction', v1=[])(loss_reduction.ReductionV2)\n\n\ndef remove_squeezable_dimensions(\n labels, predictions, expected_rank_diff=0, name=None):\n \"\"\"Squeeze last dim if ranks differ from expected by exactly 1.\n\n In the common case where we expect shapes to match, `expected_rank_diff`\n defaults to 0, and we squeeze the last dimension of the larger rank if they\n differ by 1.\n\n But, for example, if `labels` contains class IDs and `predictions` contains 1\n probability per class, we expect `predictions` to have 1 more dimension than\n `labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze\n `labels` if `rank(predictions) - rank(labels) == 0`, and\n `predictions` if `rank(predictions) - rank(labels) == 2`.\n\n This will use static shape if available. Otherwise, it will add graph\n operations, which could result in a performance hit.\n\n Args:\n labels: Label values, a `Tensor` whose dimensions match `predictions`.\n predictions: Predicted values, a `Tensor` of arbitrary dimensions.\n expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.\n name: Name of the op.\n\n Returns:\n Tuple of `labels` and `predictions`, possibly with last dim squeezed.\n \"\"\"\n with K.name_scope(name or 'remove_squeezable_dimensions'):\n predictions = ops.convert_to_tensor_v2_with_dispatch(predictions)\n labels = ops.convert_to_tensor_v2_with_dispatch(labels)\n predictions_shape = predictions.get_shape()\n predictions_rank = predictions_shape.ndims\n labels_shape = labels.get_shape()\n labels_rank = labels_shape.ndims\n if (labels_rank is not None) and (predictions_rank is not None):\n # Use static rank.\n rank_diff = predictions_rank - labels_rank\n if (rank_diff == expected_rank_diff + 1 and\n predictions_shape.dims[-1].is_compatible_with(1)):\n predictions = array_ops.squeeze(predictions, [-1])\n elif (rank_diff == expected_rank_diff - 1 and\n labels_shape.dims[-1].is_compatible_with(1)):\n labels = array_ops.squeeze(labels, [-1])\n return labels, predictions\n\n # Use dynamic rank.\n rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)\n if (predictions_rank is None) or (\n predictions_shape.dims[-1].is_compatible_with(1)):\n predictions = control_flow_ops.cond(\n math_ops.equal(expected_rank_diff + 1, rank_diff),\n lambda: array_ops.squeeze(predictions, [-1]),\n lambda: predictions)\n if (labels_rank is None) or (\n labels_shape.dims[-1].is_compatible_with(1)):\n labels = control_flow_ops.cond(\n math_ops.equal(expected_rank_diff - 1, rank_diff),\n lambda: array_ops.squeeze(labels, [-1]),\n lambda: labels)\n return labels, predictions\n\n\ndef squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None):\n \"\"\"Squeeze or expand last dimension if needed.\n\n 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1\n (using `remove_squeezable_dimensions`).\n 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1\n from the new rank of `y_pred`.\n If `sample_weight` is scalar, it is kept scalar.\n\n This will use static shape if available. Otherwise, it will add graph\n operations, which could result in a performance hit.\n\n Args:\n y_pred: Predicted values, a `Tensor` of arbitrary dimensions.\n y_true: Optional label `Tensor` whose dimensions match `y_pred`.\n sample_weight: Optional weight scalar or `Tensor` whose dimensions match\n `y_pred`.\n\n Returns:\n Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has\n the last dimension squeezed,\n `sample_weight` could be extended by one dimension.\n If `sample_weight` is None, (y_pred, y_true) is returned.\n \"\"\"\n y_pred_shape = y_pred.shape\n y_pred_rank = y_pred_shape.ndims\n if y_true is not None:\n\n # If sparse matrix is provided as `y_true`, the last dimension in `y_pred`\n # may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)),\n # y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3))\n # In this case, we should not try to remove squeezable dimension.\n y_true_shape = y_true.shape\n y_true_rank = y_true_shape.ndims\n if (y_true_rank is not None) and (y_pred_rank is not None):\n # Use static rank for `y_true` and `y_pred`.\n if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1:\n y_true, y_pred = remove_squeezable_dimensions(\n y_true, y_pred)\n else:\n # Use dynamic rank.\n rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true)\n squeeze_dims = lambda: remove_squeezable_dimensions( # pylint: disable=g-long-lambda\n y_true, y_pred)\n is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1])\n maybe_squeeze_dims = lambda: control_flow_ops.cond( # pylint: disable=g-long-lambda\n is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred))\n y_true, y_pred = control_flow_ops.cond(\n math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims)\n\n if sample_weight is None:\n return y_pred, y_true\n\n weights_shape = sample_weight.shape\n weights_rank = weights_shape.ndims\n if weights_rank == 0: # If weights is scalar, do nothing.\n return y_pred, y_true, sample_weight\n\n if (y_pred_rank is not None) and (weights_rank is not None):\n # Use static rank.\n if weights_rank - y_pred_rank == 1:\n sample_weight = array_ops.squeeze(sample_weight, [-1])\n elif y_pred_rank - weights_rank == 1:\n sample_weight = array_ops.expand_dims(sample_weight, [-1])\n return y_pred, y_true, sample_weight\n\n # Use dynamic rank.\n weights_rank_tensor = array_ops.rank(sample_weight)\n rank_diff = weights_rank_tensor - array_ops.rank(y_pred)\n maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])\n\n def _maybe_expand_weights():\n expand_weights = lambda: array_ops.expand_dims(sample_weight, [-1])\n return control_flow_ops.cond(\n math_ops.equal(rank_diff, -1), expand_weights, lambda: sample_weight)\n\n def _maybe_adjust_weights():\n return control_flow_ops.cond(\n math_ops.equal(rank_diff, 1), maybe_squeeze_weights,\n _maybe_expand_weights)\n\n # squeeze or expand last dim of `sample_weight` if its rank differs by 1\n # from the new rank of `y_pred`.\n sample_weight = control_flow_ops.cond(\n math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,\n _maybe_adjust_weights)\n return y_pred, y_true, sample_weight\n\n\ndef _safe_mean(losses, num_present):\n \"\"\"Computes a safe mean of the losses.\n\n Args:\n losses: `Tensor` whose elements contain individual loss measurements.\n num_present: The number of measurable elements in `losses`.\n\n Returns:\n A scalar representing the mean of `losses`. If `num_present` is zero,\n then zero is returned.\n \"\"\"\n total_loss = math_ops.reduce_sum(losses)\n return math_ops.div_no_nan(total_loss, num_present, name='value')\n\n\ndef _num_elements(losses):\n \"\"\"Computes the number of elements in `losses` tensor.\"\"\"\n with K.name_scope('num_elements') as scope:\n return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)\n\n\ndef reduce_weighted_loss(weighted_losses,\n reduction=ReductionV2.SUM_OVER_BATCH_SIZE):\n \"\"\"Reduces the individual weighted loss measurements.\"\"\"\n if reduction == ReductionV2.NONE:\n loss = weighted_losses\n else:\n loss = math_ops.reduce_sum(weighted_losses)\n if reduction == ReductionV2.SUM_OVER_BATCH_SIZE:\n loss = _safe_mean(loss, _num_elements(weighted_losses))\n return loss\n\n\ndef compute_weighted_loss(losses,\n sample_weight=None,\n reduction=ReductionV2.SUM_OVER_BATCH_SIZE,\n name=None):\n \"\"\"Computes the weighted loss.\n\n Args:\n losses: `Tensor` of shape `[batch_size, d1, ... dN]`.\n sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as\n `losses`, or be broadcastable to `losses`.\n reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.\n Default value is `SUM_OVER_BATCH_SIZE`.\n name: Optional name for the op.\n\n Raises:\n ValueError: If the shape of `sample_weight` is not compatible with `losses`.\n\n Returns:\n Weighted loss `Tensor` of the same type as `losses`. If `reduction` is\n `NONE`, this has the same shape as `losses`; otherwise, it is scalar.\n \"\"\"\n ReductionV2.validate(reduction)\n\n # If this function is called directly, then we just default 'AUTO' to\n # 'SUM_OVER_BATCH_SIZE'. Eg. Canned estimator use cases.\n if reduction == ReductionV2.AUTO:\n reduction = ReductionV2.SUM_OVER_BATCH_SIZE\n if sample_weight is None:\n sample_weight = 1.0\n with K.name_scope(name or 'weighted_loss'):\n # Save the `reduction` argument for loss normalization when distributing\n # to multiple replicas. Used only for estimator + v1 optimizer flow.\n ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access\n\n if not isinstance(losses, keras_tensor.KerasTensor):\n losses = ops.convert_to_tensor_v2_with_dispatch(losses)\n input_dtype = losses.dtype\n\n if not isinstance(sample_weight, keras_tensor.KerasTensor):\n sample_weight = ops.convert_to_tensor_v2_with_dispatch(sample_weight)\n\n # TODO(psv): Handle casting here in a better way, eg. if losses is float64\n # we do not want to lose precision.\n losses = math_ops.cast(losses, 'float32')\n sample_weight = math_ops.cast(sample_weight, 'float32')\n # Update dimensions of `sample_weight` to match with `losses` if possible.\n losses, _, sample_weight = squeeze_or_expand_dimensions( # pylint: disable=unbalanced-tuple-unpacking\n losses, None, sample_weight)\n weighted_losses = math_ops.multiply(losses, sample_weight)\n\n # Apply reduction function to the individual weighted losses.\n loss = reduce_weighted_loss(weighted_losses, reduction)\n # Convert the result back to the input type.\n loss = math_ops.cast(loss, input_dtype)\n return loss\n\n\ndef scale_loss_for_distribution(loss_value):\n \"\"\"Scales and returns the given loss value by the number of replicas.\"\"\"\n num_replicas = (\n distribution_strategy_context.get_strategy().num_replicas_in_sync)\n if num_replicas > 1:\n loss_value *= (1. / num_replicas)\n return loss_value\n\n\ndef cast_losses_to_common_dtype(losses):\n \"\"\"Cast a list of losses to a common dtype.\n\n If any loss is floating-point, they will all be casted to the most-precise\n floating-point loss. Otherwise the losses are not casted. We also skip casting\n losses if there are any complex losses.\n\n Args:\n losses: A list of losses.\n\n Returns:\n `losses`, but they have been casted to a common dtype.\n \"\"\"\n highest_float = None\n for loss in losses:\n if loss.dtype.is_floating:\n if highest_float is None or loss.dtype.size > highest_float.size:\n highest_float = loss.dtype\n elif {loss.dtype, highest_float} == {'bfloat16', 'float16'}:\n highest_float = 'float32'\n if loss.dtype.is_complex:\n return losses # If we find any complex losses, do not cast any losses\n if highest_float:\n losses = [math_ops.cast(loss, highest_float) for loss in losses]\n return losses\n" ]
[ [ "tensorflow.python.ops.gradient_checker.compute_gradient", "tensorflow.python.framework.test_util.run_in_graph_and_eager_modes", "tensorflow.python.platform.tf_logging.debug", "tensorflow.python.ops.array_ops.split", "numpy.issubdtype", "numpy.fabs", "tensorflow.python.ops.nn_ops.conv2d_backprop_input", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.layers.convolutional.conv2d", "tensorflow.python.platform.test.is_built_with_rocm", "tensorflow.python.framework.test_util.NHWCToNCHW", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.nn_ops.conv2d_backprop_filter", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.framework.test_util.disable_xla", "tensorflow.python.ops.nn_ops.convolution", "numpy.pad", "numpy.arange", "tensorflow.python.ops.nn_impl.separable_conv2d", "tensorflow.python.platform.test.main", "tensorflow.python.framework.test_util.device", "numpy.ravel", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.ops.nn_ops.conv2d", "numpy.rint", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.nn_ops.Convolution", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.client.session.Session", "numpy.random.rand", "tensorflow.python.ops.nn_impl.depthwise_conv2d", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.Graph", "tensorflow.python.platform.tf_logging.info", "numpy.ones", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.array_ops.pad", "tensorflow.python.framework.test_util.IsMklEnabled", "tensorflow.python.framework.test_util.NCHWToNHWC", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.test_util.GpuSupportsHalfMatMulAndConv", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.distribute.values_util.is_saving_non_distributed", "tensorflow.python.ops.gen_resource_variable_ops.read_variable_op", "tensorflow.python.distribute.values.SyncOnReadVariable.assign_add", "tensorflow.python.distribute.values.SyncOnReadVariable.assign", "tensorflow.python.eager.tape.variable_accessed", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.has_default_graph", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.distribute.values.SyncOnReadVariable.assign_sub", "tensorflow.python.distribute.values.DistributedVarOp", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.eager.context.executing_eagerly" ], [ "tensorflow.nn.compute_average_loss", "numpy.expand_dims", "tensorflow.keras.losses.CategoricalCrossentropy", "numpy.mean", "tensorflow.keras.Input", "tensorflow.keras.layers.Conv2D", "tensorflow.test.main", "tensorflow.keras.layers.Flatten", "tensorflow.python.keras.benchmarks.distribution_util.get_distribution_strategy", "tensorflow.keras.layers.Dense", "tensorflow.function", "tensorflow.GradientTape", "tensorflow.python.keras.benchmarks.distribution_util.get_strategy_scope", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.utils.to_categorical" ], [ "tensorflow.python.keras.utils.conv_utils.convert_kernel", "tensorflow.python.keras.backend.batch_set_value", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.backend.get_value", "numpy.prod", "numpy.transpose", "tensorflow.python.util.nest.flatten" ], [ "tensorflow.python.distribute.distribute_utils.update_regroup", "tensorflow.python.distribute.cross_device_ops.reduce_non_distributed_value", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.python.distribute.distribute_lib.UpdateContext", "tensorflow.python.distribute.cross_device_ops.choose_the_best", "tensorflow.python.distribute.numpy_dataset.SingleDevice", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.distribute.distribute_lib.distribution_strategy_gauge.get_cell", "tensorflow.python.framework.device.DeviceSpec.from_string", "tensorflow.python.eager.tape.stop_recording", "tensorflow.python.framework.ops.device", "tensorflow.python.distribute.distribute_utils.validate_colocate_distributed_variable", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.distribute.distribute_utils.is_sync_on_read", "tensorflow.python.framework.ops.inside_function", "tensorflow.python.distribute.distribute_lib.ValueContext", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.distribute.distribute_utils.select_replica_mirrored", "tensorflow.python.distribute.distribute_utils.is_mirrored", "tensorflow.python.distribute.distribute_lib.InputContext", "tensorflow.python.distribute.input_lib.MultiStepContext", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.distribute.device_util.resolve", "tensorflow.python.distribute.distribute_utils.regroup", "tensorflow.python.distribute.cross_device_ops.NcclAllReduce", "tensorflow.python.eager.context.device_policy", "tensorflow.python.distribute.device_util.canonicalize", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.distribute.numpy_dataset.one_host_numpy_dataset", "tensorflow.python.eager.context.num_gpus", "tensorflow.python.framework.config.list_logical_devices", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.distribute.distribute_utils.value_container", "tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.distribute.input_lib.InputWorkers", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.constant_op.constant" ], [ "numpy.random.random", "tensorflow.compat.v1.device", "numpy.random.seed", "numpy.random.choice", "tensorflow.compat.v1.global_variables", "numpy.random.random_sample", "numpy.dtype", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.lite.Interpreter", "tensorflow.lite.testing.generate_examples_report.make_report_table", "tensorflow.compat.v1.reset_default_graph", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.Graph", "numpy.isscalar", "numpy.array", "numpy.random.randint" ], [ "tensorflow.python.framework.ops.enable_eager_execution", "tensorflow.python.keras.distribute.saved_model_test_base.simple_models_with_strategies", "tensorflow.python.keras.distribute.saved_model_test_base.simple_models_with_strategy_pairs", "tensorflow.python.distribute.combinations.combine", "tensorflow.python.platform.test.main", "tensorflow.python.keras.testing_utils.run_all_without_tensor_float_32", "tensorflow.python.keras.saving.save.load_model" ], [ "tensorflow.python.keras.metrics.top_k_categorical_accuracy", "numpy.random.random", "tensorflow.python.keras.backend.eval", "numpy.arange", "tensorflow.python.keras.metrics.sparse_top_k_categorical_accuracy", "tensorflow.python.keras.backend.variable", "tensorflow.python.platform.test.main", "numpy.mean", "numpy.array", "tensorflow.python.keras.combinations.combine", "numpy.random.randint" ], [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.ops.linalg_ops.eye", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.linalg.linear_operator_util.is_ref", "tensorflow.python.util.tf_export.tf_export", "numpy.finfo", "tensorflow.python.ops.math_ops.reduce_min", "tensorflow.python.util.dispatch.dispatch_for_types", "tensorflow.python.ops.array_ops.size", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.linalg.linear_operator_algebra.matmul", "tensorflow.python.ops.linalg.linear_operator_algebra.adjoint", "tensorflow.python.ops.linalg.linear_operator_algebra.inverse", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.framework.tensor_shape.dimension_at_index", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.linalg.linalg_impl.adjoint", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.framework.tensor_shape.Dimension", "tensorflow.python.ops.linalg.linear_operator_algebra.cholesky", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.linalg.linear_operator_algebra.solve", "tensorflow.python.framework.ops.convert_to_tensor_v2_with_dispatch", "tensorflow.python.ops.array_ops.expand_dims" ], [ "tensorflow.python.ops.array_ops.rank", "tensorflow.python.keras.backend.name_scope", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.distribute.distribution_strategy_context.get_strategy", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.math_ops.div_no_nan", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.ops.array_ops.size", "tensorflow.python.framework.ops.convert_to_tensor_v2_with_dispatch", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.ops.math_ops.cast" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "2.7", "2.6", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.4", "2.9", "2.5", "2.6", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.4", "2.9", "2.5", "2.6", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.4", "2.9", "2.5", "2.6", "2.10" ] } ]
vikranth22446/PyHessian
[ "e8b1fbadb24349eef8f3a137ecfd27dfc6e3bb53" ]
[ "density_plot.py" ]
[ "#*\n# @file Different utility functions\n# Copyright (c) Zhewei Yao, Amir Gholami\n# All rights reserved.\n# This file is part of PyHessian library.\n#\n# PyHessian is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# PyHessian is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with PyHessian. If not, see <http://www.gnu.org/licenses/>.\n#*\n\nimport math\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef get_esd_plot(eigenvalues, weights):\n density, grids = density_generate(eigenvalues, weights)\n plt.semilogy(grids, density + 1.0e-7)\n plt.ylabel('Density (Log Scale)', fontsize=14, labelpad=10)\n plt.xlabel('Eigenvlaue', fontsize=14, labelpad=10)\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.axis([np.min(eigenvalues) - 1, np.max(eigenvalues) + 1, None, None])\n plt.tight_layout()\n plt.savefig('example.pdf')\n\n\ndef density_generate(eigenvalues,\n weights,\n num_bins=10000,\n sigma_squared=1e-5,\n overhead=0.01):\n\n eigenvalues = np.array(eigenvalues)\n weights = np.array(weights)\n\n lambda_max = np.mean(np.max(eigenvalues, axis=1), axis=0) + overhead\n lambda_min = np.mean(np.min(eigenvalues, axis=1), axis=0) - overhead\n\n grids = np.linspace(lambda_min, lambda_max, num=num_bins)\n sigma = sigma_squared * max(1, (lambda_max - lambda_min))\n\n num_runs = eigenvalues.shape[0]\n density_output = np.zeros((num_runs, num_bins))\n\n for i in range(num_runs):\n for j in range(num_bins):\n x = grids[j]\n tmp_result = gaussian(eigenvalues[i, :], x, sigma)\n density_output[i, j] = np.sum(tmp_result * weights[i, :])\n density = np.mean(density_output, axis=0)\n normalization = np.sum(density) * (grids[1] - grids[0])\n density = density / normalization\n return density, grids\n\n\ndef gaussian(x, x0, sigma_squared):\n return np.exp(-(x0 - x)**2 /\n (2.0 * sigma_squared)) / np.sqrt(2 * np.pi * sigma_squared)\n" ]
[ [ "matplotlib.pyplot.semilogy", "matplotlib.pyplot.yticks", "matplotlib.pyplot.tight_layout", "numpy.sqrt", "numpy.linspace", "numpy.min", "matplotlib.use", "matplotlib.pyplot.savefig", "numpy.max", "numpy.mean", "numpy.exp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.array", "numpy.zeros", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nokia/integratedimputation
[ "ca72bda54cb66e99d79ff0b174cf8f99ccb554ba", "ca72bda54cb66e99d79ff0b174cf8f99ccb554ba", "ca72bda54cb66e99d79ff0b174cf8f99ccb554ba" ]
[ "evals/gain/gain_fixm.py", "common/wgain_.py", "common/gain_.py" ]
[ "#!/usr/bin/env python3\n\n# © 2021 Nokia\n#\n# Licensed under the BSD 3 Clause license\n# SPDX-License-Identifier: BSD-3-Clause\n\n# http://proceedings.mlr.press/v80/yoon18a/yoon18a.pdf\n\nimport sys\n\nsys.path.append('../../common/')\nfrom defaults import *\nfrom gain_ import train\nfrom data_mobile import loadData, normData, foldData\nfrom eval_ import EvalACC\nimport utils\n\nsys.path.append('../../common/nets/')\nfrom net_ae import NetAEConvTrans\nfrom net_disc import NetDiscConvTrans\n\nimport numpy as np\n\nimport torch\nimport torch.utils.data\n\nimport argparse\n\n# ==============================================================================\n# Settings =====================================================================\nparser = argparse.ArgumentParser()\nparser.add_argument('--out_folder', default = './out_test')\nparser.add_argument('--missing_type', default = 'ran')\nparser.add_argument('--gpu_id', default = None, type = int)\nparser.add_argument('--missing_rate_train', default = 0.5, type = float)\nparser.add_argument('--fold', default = 0, type = int)\nargs = parser.parse_args()\n\nout_folder = args.out_folder\nmissing_type = args.missing_type\ngpu_id = args.gpu_id\nmissing_rate_train = args.missing_rate_train\nfold = args.fold\n\nlr_ae = 0.0001\nwd_ae = 1e-05\n\nlr_disc = 0.0001\nwd_disc = 1e-05\n\nalpha = 10\niter_disc = 5\n\n\n# ==============================================================================\n# Data =========================================================================\nutils.makeFolders(out_folder)\n\nvalues_np, labels_np = loadData()\nvalues_np = normData(values_np)\n\nvalues_np_train, values_np_test, labels_np_train, labels_np_test = foldData(values_np, labels_np, fold)\n\n\n# ==============================================================================\n# Data loaders =================================================================\ndataset_train = torch.utils.data.TensorDataset(\n torch.tensor(values_np_train, dtype = torch.float),\n torch.tensor(labels_np_train, dtype = torch.long)\n)\n\ndataloader_train = torch.utils.data.DataLoader(\n dataset_train,\n batch_size = batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = 3\n)\n\ndataset_test = torch.utils.data.TensorDataset(\n torch.tensor(values_np_test, dtype = torch.float),\n torch.tensor(labels_np_test, dtype = torch.long)\n)\n\ndataloader_test = torch.utils.data.DataLoader(\n dataset_test,\n batch_size = batch_size,\n shuffle = False,\n pin_memory = True,\n num_workers = 3\n)\n\n# ==============================================================================\n# Definitions ==================================================================\nif missing_type == 'seq':\n introduceMissingTrain = utils.IntroduceMissingSeq(missing_rate_train)\nelse:\n introduceMissingTrain = utils.IntroduceMissing(missing_rate_train)\n \n# ==============================================================================\n# Instantiation ================================================================\nnet_ae = NetAEConvTrans(values_np.shape[1] * 2, values_np.shape[1])\nnet_disc = NetDiscConvTrans(values_np.shape[1], values_np.shape[1])\neval_acc = EvalACC(values_np.shape[1] * 2, out_folder, fold, epochs_gain_fixm[0], eval_acc_every)\n\nnet_dict = {\n \"net_ae\": net_ae,\n \"net_disc\": net_disc\n}\n\n\n# ==============================================================================\n# Move to GPU ==================================================================\ndevice = torch.device(\"cuda:%d\" % utils.gpuAssign(gpu_id))\n\nnet_ae.to(device)\nnet_disc.to(device)\n\neval_acc.to(device)\n\n\n# ==============================================================================\n# Opts =========================================================================\nopt_ae = torch.optim.Adam(\n net_ae.parameters(),\n lr = lr_ae,\n weight_decay = wd_ae\n)\n\nopt_disc = torch.optim.Adam(\n net_disc.parameters(),\n lr = lr_disc,\n weight_decay = wd_disc\n)\n\nopt_dict = {\n \"opt_ae\": opt_ae,\n \"opt_disc\": opt_disc\n}\n\n\n# ==============================================================================\n# Calls ========================================================================\ntrain(\n alpha,\n iter_disc,\n introduceMissingTrain,\n net_dict,\n opt_dict,\n dataloader_train,\n dataloader_test,\n device,\n eval_every,\n out_folder,\n eval_acc,\n epochs_end = epochs_gain_fixm[1],\n epochs_start = epochs_gain_fixm[0]\n)\n", "# © 2021 Nokia\n#\n# Licensed under the BSD 3 Clause license\n# SPDX-License-Identifier: BSD-3-Clause\n\n# http://proceedings.mlr.press/v80/yoon18a/yoon18a.pdf\n\nfrom defaults import *\n\nimport utils\n\nimport numpy as np\n\nimport torch\nimport torch.autograd as autograd\n\n# ==============================================================================\n# Defs =========================================================================\ndef calc_gradient_penalty(net_disc, real_data, generated_data, lambda_gp):\n # Calculate interpolation\n b_size = real_data.size(0)\n shape = [b_size] + [1] * (real_data.dim() - 1)\n alpha = torch.rand(shape, dtype=torch.float32, device=real_data.device)\n\n interpolated = alpha * real_data.detach() + (1 - alpha) * generated_data.detach()\n interpolated.requires_grad_(True)\n\n # Calculate scores of interpolated examples\n score_interpolated = net_disc(interpolated)\n\n # Calculate gradients of scores with respect to examples\n gradients = autograd.grad(outputs=score_interpolated, inputs=interpolated,\n grad_outputs=torch.ones_like(score_interpolated),\n create_graph=True, retain_graph=True)[0]\n\n # Flatten to easily take norm per example in batch\n gradients = gradients.view(b_size, -1)\n\n # Derivatives of the gradient close to 0 can cause problems because of\n # the square root, so manually calculate norm and add epsilon\n gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)\n\n # Return gradient penalty\n return(lambda_gp * ((gradients_norm - 1) ** 2).mean())\n\ndef evaluate(\n net_ae,\n dataloader_test,\n device,\n epoch,\n eval_acc\n):\n \n for i_mr, missing_rate in enumerate(missing_rates_eval):\n for j_mt, missing_type in enumerate(missing_types_eval):\n \n if missing_type == 'seq':\n introduceMissingEval = utils.IntroduceMissingSeq(missing_rate)\n else:\n introduceMissingEval = utils.IntroduceMissing(missing_rate)\n \n with torch.no_grad():\n net_ae.eval()\n for i, data in enumerate(dataloader_test, 0):\n x, l = data\n x = x.to(device)\n \n # Introduce missingness\n x_mis, mask = introduceMissingEval(x)\n \n # Attach mask to input\n mask_in = (mask * 2) - 1 # Centering the mask\n x_mis_in = torch.cat([x_mis, mask_in], dim = 1)\n \n # Main forward\n out = net_ae(x_mis_in)\n \n eval_acc.accumulate(x, x_mis, mask, out, l, epoch+1, 2*i_mr + j_mt)\n \ndef train(\n alpha,\n iter_disc,\n lambda_gp,\n introduceMissingTrain,\n net_dict,\n opt_dict,\n dataloader_train,\n dataloader_test,\n device,\n eval_every,\n out_folder,\n eval_acc,\n epochs_end = 10, \n epochs_start = 0\n):\n \n if epochs_start != 0:\n utils.loadAll(\n net_dict,\n opt_dict,\n epochs_start,\n epoch_digits,\n out_folder\n )\n \n net_ae = net_dict[\"net_ae\"]\n opt_ae = opt_dict[\"opt_ae\"]\n \n net_disc = net_dict[\"net_disc\"]\n opt_disc = opt_dict[\"opt_disc\"]\n \n logger = utils.Logger([\"l_ae_mis\", \"l_ae_rec\", \"l_disc\"], epoch_digits, out_folder, epochs_start != 0)\n\n for epoch in range(epochs_start, epochs_end):\n net_ae.train()\n net_disc.train()\n for i, data in enumerate(dataloader_train, 0):\n x, _ = data\n x = x.to(device)\n \n # Introduce missingness, generate noisy input\n x_mis, mask = introduceMissingTrain(x)\n\n # Attach mask to input\n mask_in = (mask * 2) - 1 # Centering the mask\n x_mis_in = torch.cat([x_mis, mask_in], dim = 1)\n \n # Discriminator ----------------------------------------------------\n if introduceMissingTrain.missing_rate is not None and introduceMissingTrain.missing_rate != 0:\n with torch.no_grad():\n out = net_ae(x_mis_in)\n out_imp = x_mis + ((1 - mask) * out)\n \n # Random sample/shuffle inputs\n perm_both = torch.cat([x.unsqueeze(3), out_imp.unsqueeze(3)], dim = 3)\n perm_mask = (torch.rand_like(x) > 0.5).to(torch.long).unsqueeze(3)\n\n perm_real = torch.gather(perm_both, 3, perm_mask).squeeze(3)\n perm_fake = torch.gather(perm_both, 3, (1 - perm_mask)).squeeze(3)\n \n out_disc_real = net_disc(perm_real)\n out_disc_fake = net_disc(perm_fake)\n\n out_disc_both = torch.cat([out_disc_real.unsqueeze(3), out_disc_fake.unsqueeze(3)], dim = 3)\n out_disc_real = torch.gather(out_disc_both, 3, perm_mask).squeeze(3)\n out_disc_fake = torch.gather(out_disc_both, 3, (1 - perm_mask)).squeeze(3)\n \n # Losses\n l_disc_real = -torch.mean((1 - mask) * out_disc_real)\n l_disc_fake = torch.mean((1 - mask) * out_disc_fake)\n l_grad_pen = calc_gradient_penalty(net_disc, x, out_imp, lambda_gp)\n \n l_disc = l_disc_real + l_disc_fake + l_grad_pen\n \n opt_disc.zero_grad()\n l_disc.backward()\n opt_disc.step()\n \n logger.accumulate([l_disc.item()], [2])\n \n # AE ---------------------------------------------------------------\n if not (i % iter_disc):\n out = net_ae(x_mis_in)\n out_imp = x_mis + ((1 - mask) * out)\n \n out_disc_fake = net_disc(out_imp)\n\n l_ae_mis = -torch.mean((1 - mask) * out_disc_fake)\n\n l_ae_rec = torch.nn.MSELoss()(out, x)\n l_ae = (alpha * l_ae_rec) + l_ae_mis\n \n opt_ae.zero_grad()\n l_ae.backward()\n opt_ae.step()\n\n logger.accumulate([l_ae_mis.item(), l_ae_rec.item()])\n\n # Eval\n if not ((epoch + 1) % eval_every):\n evaluate(net_ae, dataloader_test, device, epoch, eval_acc)\n logger.log(epoch + 1)\n eval_acc.log(epoch + 1)\n\n # Save\n utils.saveAll(\n net_dict,\n opt_dict,\n epoch + 1,\n epoch_digits,\n out_folder\n )\n\n print('Finished Training')\n", "# © 2021 Nokia\n#\n# Licensed under the BSD 3 Clause license\n# SPDX-License-Identifier: BSD-3-Clause\n\n# http://proceedings.mlr.press/v80/yoon18a/yoon18a.pdf\n\nfrom defaults import *\n\nimport utils\n\nimport numpy as np\n\nimport torch\n\n\n# ==============================================================================\n# Defs =========================================================================\ndef evaluate(\n net_ae,\n net_disc,\n introduceMissingTrain,\n dataloader_test,\n device,\n epoch,\n eval_acc,\n logger\n):\n with torch.no_grad():\n net_ae.eval()\n for i, data in enumerate(dataloader_test, 0):\n x, _ = data\n x = x.to(device)\n\n # Introduce missingness\n x_mis, mask = introduceMissingTrain(x)\n\n # Attach mask to input\n mask_in = (mask * 2) - 1 # Centering the mask\n x_mis_in = torch.cat([x_mis, mask_in], dim = 1)\n\n # Main forward\n out = net_ae(x_mis_in)\n out_imp = x_mis + ((1 - mask) * out)\n out_disc = net_disc(out_imp)\n \n acc_disc = torch.mean((torch.round(out_disc) == mask).double())\n\n logger.accumulate([acc_disc.item()], [4])\n \n for i_mr, missing_rate in enumerate(missing_rates_eval):\n for j_mt, missing_type in enumerate(missing_types_eval):\n \n if missing_type == 'seq':\n introduceMissingEval = utils.IntroduceMissingSeq(missing_rate)\n else:\n introduceMissingEval = utils.IntroduceMissing(missing_rate)\n \n with torch.no_grad():\n net_ae.eval()\n for i, data in enumerate(dataloader_test, 0):\n x, l = data\n x = x.to(device)\n \n # Introduce missingness\n x_mis, mask = introduceMissingEval(x)\n \n # Attach mask to input\n mask_in = (mask * 2) - 1 # Centering the mask\n x_mis_in = torch.cat([x_mis, mask_in], dim = 1)\n \n # Main forward\n out = net_ae(x_mis_in)\n \n eval_acc.accumulate(x, x_mis, mask, out, l, epoch+1, 2*i_mr + j_mt)\n \ndef train(\n alpha,\n iter_disc,\n introduceMissingTrain,\n net_dict,\n opt_dict,\n dataloader_train,\n dataloader_test,\n device,\n eval_every,\n out_folder,\n eval_acc,\n epochs_end = 10, \n epochs_start = 0\n):\n \n if epochs_start != 0:\n utils.loadAll(\n net_dict,\n opt_dict,\n epochs_start,\n epoch_digits,\n out_folder\n )\n \n net_ae = net_dict[\"net_ae\"]\n opt_ae = opt_dict[\"opt_ae\"]\n \n net_disc = net_dict[\"net_disc\"]\n opt_disc = opt_dict[\"opt_disc\"]\n \n logger = utils.Logger([\"l_ae_mis\", \"l_ae_rec\", \"l_disc\", \"acc_disc\", \"acc_eval_disc\"], epoch_digits, out_folder, epochs_start != 0)\n\n for epoch in range(epochs_start, epochs_end):\n net_ae.train()\n net_disc.train()\n for i, data in enumerate(dataloader_train, 0):\n x, _ = data\n x = x.to(device)\n \n # Introduce missingness, generate noisy input\n x_mis, mask = introduceMissingTrain(x)\n\n # Attach mask to input\n mask_in = (mask * 2) - 1 # Centering the mask\n x_mis_in = torch.cat([x_mis, mask_in], dim = 1)\n \n # Discriminator ----------------------------------------------------\n if introduceMissingTrain.missing_rate is not None and introduceMissingTrain.missing_rate != 0:\n with torch.no_grad():\n out = net_ae(x_mis_in)\n out_imp = x_mis + ((1 - mask) * out)\n\n # Random sample/shuffle inputs\n perm_both = torch.cat([x.unsqueeze(3), out_imp.unsqueeze(3)], dim = 3)\n perm_mask = (torch.rand_like(x) > 0.5).to(torch.long).unsqueeze(3)\n\n perm_real = torch.gather(perm_both, 3, perm_mask).squeeze(3)\n perm_fake = torch.gather(perm_both, 3, (1 - perm_mask)).squeeze(3)\n \n out_disc_real = net_disc(perm_real)\n out_disc_fake = net_disc(perm_fake)\n\n out_disc_both = torch.cat([out_disc_real.unsqueeze(3), out_disc_fake.unsqueeze(3)], dim = 3)\n out_disc_real = torch.gather(out_disc_both, 3, perm_mask).squeeze(3)\n out_disc_fake = torch.gather(out_disc_both, 3, (1 - perm_mask)).squeeze(3)\n \n # Losses\n l_disc_real = (1 - mask) * torch.log(out_disc_real + eps)\n l_disc_fake = (1 - mask) * torch.log(1 - out_disc_fake + eps)\n \n l_disc = -torch.mean(l_disc_real + l_disc_fake)\n\n acc_disc = (\n torch.sum((1 - mask) * (1 - torch.round(out_disc_fake))) +\n torch.sum((1 - mask) * torch.round(out_disc_real))\n ) / (2 * torch.sum(1 - mask))\n \n opt_disc.zero_grad()\n l_disc.backward()\n opt_disc.step()\n \n logger.accumulate([l_disc.item(), acc_disc.item()], [2, 3])\n \n # AE ---------------------------------------------------------------\n if not (i % iter_disc):\n out = net_ae(x_mis_in)\n out_imp = x_mis + ((1 - mask) * out)\n \n out_disc_fake = net_disc(out_imp)\n \n l_ae_mis_fake = (1 - mask) * torch.log(out_disc_fake + eps)\n l_ae_mis = -torch.mean(l_ae_mis_fake)\n \n l_ae_rec = torch.nn.MSELoss()(out, x)\n l_ae = (alpha * l_ae_rec) + l_ae_mis\n \n opt_ae.zero_grad()\n l_ae.backward()\n opt_ae.step()\n\n logger.accumulate([l_ae_mis.item(), l_ae_rec.item()])\n\n # Eval\n if not ((epoch + 1) % eval_every):\n evaluate(net_ae, net_disc, introduceMissingTrain, dataloader_test, device, epoch, eval_acc, logger)\n logger.log(epoch + 1)\n eval_acc.log(epoch + 1)\n\n # Save\n utils.saveAll(\n net_dict,\n opt_dict,\n epoch + 1,\n epoch_digits,\n out_folder\n )\n\n print('Finished Training')\n" ]
[ [ "torch.utils.data.DataLoader", "torch.tensor" ], [ "torch.mean", "torch.nn.MSELoss", "torch.rand_like", "torch.cat", "torch.sum", "torch.no_grad", "torch.rand", "torch.gather", "torch.ones_like" ], [ "torch.mean", "torch.rand_like", "torch.cat", "torch.round", "torch.sum", "torch.no_grad", "torch.log", "torch.gather", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aimimi2015/LDA_patent
[ "e5df0b8e1b741c19352485b5b2dca560e1a961f1" ]
[ "matplotlib/line.py" ]
[ "# coding: utf-8\nfrom __future__ import print_function\nfrom __future__ import print_function\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport pprint\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pymysql\nimport pickle\nfrom sympy import *\n\n#x1 = np.arange(1, 23, 1)\n# y = np.array([4.00, 6.40, 8.00, 8.80, 9.22, 9.50, 9.70, 9.86, 10.00, 10.20, 10.32, 11.42, 12.00, 12.42, 13.00, 15.00, 16.20, 17.32, 19.42, 21.00])\n#y1 = np.array([0.145, 0.046, 0.044, 0.040, 0.18, 0.047, 0.048 ,0.13, 0.035, 0.035, 0.032,0.145, 0.046, 0.044, 0.040, 0.18, 0.047, 0.048 ,0.13, 0.035, 0.035, 0.032])\n\n\npkl_file = open('../领域预测/topiclist/list22.pkl', 'rb')\n\nlist1 = pickle.load(pkl_file)\n\n\n#print(json.dumps(list1 , encoding='UTF-8', ensure_ascii=False))\n\n#print len(list1)\n\nnewlist=[]\nsumlist=0\ni=0\nh=0\nj=1 #这是间隔,如果 =1,就是一个月一个月的\nwhile i<len(list1):\n while h<j:\n sumlist = sumlist+list1[i+h]\n h=h+1\n newlist.append(sumlist)\n sumlist=0\n h=0\n i=i+j\n\nprint (len(newlist))\n\nx = np.arange(1, len(newlist)+1, 1)\n#y = np.array([4.00, 6.40, 8.00, 8.80, 9.22, 9.50, 9.70, 9.86, 10.00, 10.20, 10.32, 11.42, 12.00, 12.42, 13.00, 15.00, 16.20, 17.32, 19.42, 21.00])\ny = np.array(newlist)\n\n\nz1 = np.polyfit(x, y, 2) # 用3次多项式拟合\np1 = np.poly1d(z1)\n\nyvals = p1(x)\np2 = abs(yvals - y)\nsigma = np.std(p2)\nprint(sigma)\nprint(p2)\n\n'''\n具体来说,三西格玛规则是建立在数据服从正态分布的基础之上的,其阈值为\n正态分布平均值与三倍标准差之和。在正态分布中标准差为𝜎,均值为𝜇,对于全部\n的数据来说,数值分布在(𝜇 − 𝜎,𝜇 + 𝜎)中的概率为 0.655,布在(𝜇 − 2𝜎,𝜇 + 2𝜎)中的\n概率为 0.954,分布在(𝜇 − 3𝜎,𝜇 + 3𝜎)中的概率大致为 0.997。规则规定任何大于三\n西格玛阈值的值都极有可能是异常值。因此我们以图 4.3 中程序移除异常值,并进行\n临近数据点平均值替换。\n\n'''\nprint (\"p1:\"),\nprint(p1) # 在屏幕上打印拟合多项式\nyvals = p1(x) # 也可以使用yvals=np.polyval(z1,x)\nybar = np.sum(y) / len(y)\n\n#print(type(np.mean(p2)))\nout = p2>sigma*3\n#print(type(out))\nprint (out)\n\nssreg = np.sum((yvals - ybar) ** 2) #拟合数据方差\nsstot = np.sum((y - ybar) ** 2) #原始数据方差\nprint (ssreg / sstot) # 准确率\n\nplot1 = plt.plot(x, y, '*', label='original values')\nplot2 = plt.plot(x, yvals, 'r', label='polyfit values')\nplt.xlabel('year(05-15)')\nplt.ylabel('Proportion')\nplt.legend(loc=4) # 指定legend的位置,读者可以自己help它的用法\nplt.title('topic1')\nplt.show()\nplt.savefig('p1.png')\n\n\ny_new = y.tolist() #准备修改 这就是之后被替换的新的y分布\nyvals1 = yvals.tolist() #准备修改\n\n#\n# def quzao(sigma,y_new,yvals1):\n# i=0\n# while i < len(y_new):\n# if abs(y_new[i] - yvals1[i]) >= sigma * 3:\n# print(y_new[i])\n# if i != 0 and i != len(y) - 1:\n# y_new[i] = (y_new[i - 1] + y_new[i - 2]) * 0.5\n#\n# elif i == len(y) - 1:\n# y_new[i] = (y_new[len(y) - 2] + y_new[len(y) - 3]) * 0.5\n#\n# z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合\n# p1 = np.poly1d(z1)\n#\n# i = i + 1\n\n\nwhile True:\n i = 0\n while i < len(y):\n if abs(y_new[i]-yvals1[i])>=sigma*3:\n print (y_new[i])\n if i!=0 and i!=len(y)-1:\n y_new[i] = (y_new[i - 1] + y_new[i-2]) * 0.5\n elif i==1:\n y_new[i] = (y_new[0] + y_new[2]) * 0.5\n #z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合\n #p1 = np.poly1d(z1)\n\n\n # yvals_new = p1(x1)\n # plot_new1 = plt.plot(x1, y_new, '*', label='original values')\n # plot_new12 = plt.plot(x1, yvals_new, 'r', label='polyfit values')\n # plt.xlabel('x axis')\n # plt.ylabel('y axis')\n # plt.legend(loc=4) # 指定legend的位置\n # plt.title('polyfitting')\n # plt.show()\n # print('========')\n\n i=i+1\n z1 = np.polyfit(x, y_new, 2) # 用3次多项式拟合\n p1 = np.poly1d(z1)\n\n yvals = p1(x)\n p2 = abs(yvals - y_new)\n sigma1 = np.std(p2)\n print(sigma1)\n if(sigma==sigma1):\n break\n else:\n sigma=sigma1\n\n\nprint(y_new)\n\nz_new = np.polyfit(x, y_new, 2) # 用3次多项式拟合\np_new = np.poly1d(z_new)\nyvals_new = p_new(x)\nybar_new = np.sum(y_new) / len(y)\nssreg = np.sum((yvals_new - ybar_new) ** 2)\nsstot = np.sum((y_new - ybar_new) ** 2)\nsstot_old = np.sum((y - ybar) ** 2) #原始数据方差\n\nprint (ssreg / sstot_old) # 准确率\n\n\nplot_new1 = plt.plot(x, y_new, '*', label='original values')\nplot_new12 = plt.plot(x, yvals_new, 'r', label='polyfit values')\nplt.xlabel('year(05-15)')\nplt.ylabel('Proportion')\nplt.legend(loc=4) # 指定legend的位置\nplt.title('topic10')\nplt.show()\nplt.savefig('p1.png')\n\n\nprint(p_new)\n# # 定义函数变量x\n# x=Symbol(\"x\")\n#\n# # 对函数sin(x)求导,并且显示\n# print(diff(p_new, x))" ]
[ [ "matplotlib.pyplot.legend", "numpy.polyfit", "numpy.poly1d", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.std", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ceyron/Lattice-Boltzmann-Method-JAX
[ "f18e136e6e12fa575104053818c53b1689e50948" ]
[ "lattice_boltzmann_method_python_jax.py" ]
[ "r\"\"\"\nSolves the incompressible Navier Stokes equations using the Lattice-Boltzmann\nMethod¹. The scenario is the flow around a cylinder in 2D which yields a van\nKarman vortex street.\n\n\n periodic\n +-------------------------------------------------------------+\n | |\n | ---> |\n | |\n | ---> **** |\n | ******** | \ninflow | ---> ********** | outflow\n | ******** |\n | ---> **** |\n | |\n | ---> |\n | |\n +-------------------------------------------------------------+\n periodic\n\n-> uniform inflow profile with only horizontal velocities at left boundary\n-> outflow boundary at the right\n-> top and bottom boundary connected by periodicity\n-> the circle in the center (representing a slice from the 3d cylinder)\n uses a no-slip Boundary Condition\n-> initially, fluid is NOT at rest and has the horizontal velocity profile\n all over the domain\n\n¹ To be fully correct, LBM considers the compressible Navier-Stokes Equations.\nThis can also be seen by the fact that we have a changing macroscopic density over\nthe domain and that we actively use it throughout the computations. However, our\nflow speeds are below the 0.3 Mach limit which results in only minor density\nfluctuations. Hence, the fluid behaves almost incompressible. \n\n------\n\nSolution strategy:\n\nDiscretize the domain into a Cartesian mesh. Each grid vertex is associated\nwith 9 discrete velocities (D2Q9) and 2 macroscopic velocities. Then iterate\nover time.\n\n\n1. Apply outflow boundary condition on the right boundary\n\n2. Compute Macroscopic Quantities (density and velocities)\n\n3. Apply Inflow Profile by Zou/He Dirichlet Boundary Condition\n on the left boundary\n\n4. Compute the discrete equilibria velocities\n\n5. Perform a Collision step according to BGK (Bhatnagar–Gross–Krook)\n\n6. Apply Bounce-Back Boundary Conditions on the cylinder obstacle\n\n7. Stream alongside the lattice velocities\n\n8. Advance in time (repeat the loop)\n\n\nThe 7th step implicitly yields the periodic Boundary Conditions at\nthe top and bottom boundary.\n\n------\n\nEmployed Discretization:\n\nD2Q9 grid, i.e. 2-dim space with 9 discrete\nvelocities per node. In Other words the 2d space is discretized into\nN_x by N_y by 9 points.\n\n 6 2 5\n \\ | /\n 3 - 0 - 1\n / | \\\n 7 4 8 \n\nTherefore we have the shapes:\n\n- macroscopic velocity : (N_x, N_y, 2)\n- discrete velocity : (N_x, N_y, 9)\n- density : (N_x, N_y)\n\n\n------\n\nLattice Boltzmann Computations\n\nDensity:\n\nρ = ∑ᵢ fᵢ\n\n\nVelocities:\n\nu = 1/ρ ∑ᵢ fᵢ cᵢ\n\n\nEquilibrium:\n\nfᵢᵉ = ρ Wᵢ (1 + 3 cᵢ ⋅ u + 9/2 (cᵢ ⋅ u)² − 3/2 ||u||₂²)\n\n\nBGK Collision:\n\nfᵢ ← fᵢ − ω (fᵢ − fᵢᵉ)\n\n\nwith the following quantities:\n\nfᵢ : Discrete velocities\nfᵢᵉ : Equilibrium discrete velocities\nρ : Density\n∑ᵢ : Summation over all discrete velocities\ncᵢ : Lattice Velocities\nWᵢ : Lattice Weights\nω : Relaxation factor\n\n------\n\nThe flow configuration is defined using the Reynolds Number\n\nRe = (U R) / ν\n\nwith:\n\nRe : Reynolds Number\nU : Inflow Velocity\nR : Cylinder Radius\nν : Kinematic Viscosity\n\nCan be re-arranged in terms of the kinematic viscosity\n\nν = (U R) / Re\n\nThen the relaxation factor is computed according to\n\nω = 1 / (3 ν + 0.5)\n\n------\n\nNote that this scheme can become unstable for Reynoldsnumbers >~ 350 ²\n\n² Note that the stability of the D2Q9 scheme is mathematically not\nlinked to the Reynoldsnumber. Just use this as a reference. Stability\nfor this scheme is realted to the velocity magnitude.\nConsequentially, the actual limiting factor is the Mach number (the\nratio between velocity magnitude and the speed of sound).\n\n\"\"\"\nimport jax\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport cmasher as cmr\nfrom tqdm import tqdm\n\nN_ITERATIONS = 15_000\nREYNOLDS_NUMBER = 80\n\nN_POINTS_X = 300\nN_POINTS_Y = 50\n\nCYLINDER_CENTER_INDEX_X = N_POINTS_X // 5\nCYLINDER_CENTER_INDEX_Y = N_POINTS_Y // 2\nCYLINDER_RADIUS_INDICES = N_POINTS_Y // 9\n\nMAX_HORIZONTAL_INFLOW_VELOCITY = 0.04\n\nVISUALIZE = True\nPLOT_EVERY_N_STEPS = 100\nSKIP_FIRST_N_ITERATIONS = 5000\n\n\nr\"\"\"\nLBM Grid: D2Q9\n 6 2 5\n \\ | /\n 3 - 0 - 1\n / | \\\n 7 4 8 \n\"\"\"\n\nN_DISCRETE_VELOCITIES = 9\n\nLATTICE_VELOCITIES = jnp.array([\n [ 0, 1, 0, -1, 0, 1, -1, -1, 1,],\n [ 0, 0, 1, 0, -1, 1, 1, -1, -1,]\n])\n\nLATTICE_INDICES = jnp.array([\n 0, 1, 2, 3, 4, 5, 6, 7, 8,\n])\n\nOPPOSITE_LATTICE_INDICES = jnp.array([\n 0, 3, 4, 1, 2, 7, 8, 5, 6,\n])\n\nLATTICE_WEIGHTS = jnp.array([\n 4/9, # Center Velocity [0,]\n 1/9, 1/9, 1/9, 1/9, # Axis-Aligned Velocities [1, 2, 3, 4]\n 1/36, 1/36, 1/36, 1/36, # 45 ° Velocities [5, 6, 7, 8]\n])\n\nRIGHT_VELOCITIES = jnp.array([1, 5, 8])\nUP_VELOCITIES = jnp.array([2, 5, 6])\nLEFT_VELOCITIES = jnp.array([3, 6, 7])\nDOWN_VELOCITIES = jnp.array([4, 7, 8])\nPURE_VERTICAL_VELOCITIES = jnp.array([0, 2, 4])\nPURE_HORIZONTAL_VELOCITIES = jnp.array([0, 1, 3])\n\n\ndef get_density(discrete_velocities):\n density = jnp.sum(discrete_velocities, axis=-1)\n\n return density\n\ndef get_macroscopic_velocities(discrete_velocities, density):\n macroscopic_velocities = jnp.einsum(\n \"NMQ,dQ->NMd\",\n discrete_velocities,\n LATTICE_VELOCITIES,\n ) / density[..., jnp.newaxis]\n\n return macroscopic_velocities\n\ndef get_equilibrium_discrete_velocities(macroscopic_velocities, density):\n projected_discrete_velocities = jnp.einsum(\n \"dQ,NMd->NMQ\",\n LATTICE_VELOCITIES,\n macroscopic_velocities,\n )\n macroscopic_velocity_magnitude = jnp.linalg.norm(\n macroscopic_velocities,\n axis=-1,\n ord=2,\n )\n equilibrium_discrete_velocities = (\n density[..., jnp.newaxis]\n *\n LATTICE_WEIGHTS[jnp.newaxis, jnp.newaxis, :]\n *\n (\n 1\n +\n 3 * projected_discrete_velocities\n +\n 9/2 * projected_discrete_velocities**2\n -\n 3/2 * macroscopic_velocity_magnitude[..., jnp.newaxis]**2\n )\n )\n\n return equilibrium_discrete_velocities\n\ndef main():\n jax.config.update(\"jax_enable_x64\", True)\n\n kinematic_viscosity = (\n (\n MAX_HORIZONTAL_INFLOW_VELOCITY\n *\n CYLINDER_RADIUS_INDICES\n ) / (\n REYNOLDS_NUMBER\n )\n )\n relaxation_omega = (\n (\n 1.0\n ) / (\n 3.0\n *\n kinematic_viscosity\n +\n 0.5\n )\n )\n\n # Define a mesh\n x = jnp.arange(N_POINTS_X)\n y = jnp.arange(N_POINTS_Y)\n X, Y = jnp.meshgrid(x, y, indexing=\"ij\")\n\n # Obstacle Mask: An array of the shape like X or Y, but contains True if the\n # point belongs to the obstacle and False if not\n obstacle_mask = (\n jnp.sqrt(\n (\n X\n -\n CYLINDER_CENTER_INDEX_X\n )**2\n +\n (\n Y\n -\n CYLINDER_CENTER_INDEX_Y\n )**2\n )\n <\n CYLINDER_RADIUS_INDICES\n )\n\n velocity_profile = jnp.zeros((N_POINTS_X, N_POINTS_Y, 2))\n velocity_profile = velocity_profile.at[:, :, 0].set(MAX_HORIZONTAL_INFLOW_VELOCITY)\n\n @jax.jit\n def update(discrete_velocities_prev):\n # (1) Prescribe the outflow BC on the right boundary\n discrete_velocities_prev = discrete_velocities_prev.at[-1, :, LEFT_VELOCITIES].set(\n discrete_velocities_prev[-2, :, LEFT_VELOCITIES]\n )\n\n # (2) Macroscopic Velocities\n density_prev = get_density(discrete_velocities_prev)\n macroscopic_velocities_prev = get_macroscopic_velocities(\n discrete_velocities_prev,\n density_prev,\n )\n\n # (3) Prescribe Inflow Dirichlet BC using Zou/He scheme\n macroscopic_velocities_prev =\\\n macroscopic_velocities_prev.at[0, 1:-1, :].set(\n velocity_profile[0, 1:-1, :]\n )\n density_prev = density_prev.at[0, :].set(\n (\n get_density(discrete_velocities_prev[0, :, PURE_VERTICAL_VELOCITIES].T)\n +\n 2 *\n get_density(discrete_velocities_prev[0, :, LEFT_VELOCITIES].T)\n ) / (\n 1 - macroscopic_velocities_prev[0, :, 0]\n )\n )\n\n # (4) Compute discrete Equilibria velocities\n equilibrium_discrete_velocities = get_equilibrium_discrete_velocities(\n macroscopic_velocities_prev,\n density_prev,\n )\n\n # (3) Belongs to the Zou/He scheme\n discrete_velocities_prev = \\\n discrete_velocities_prev.at[0, :, RIGHT_VELOCITIES].set(\n equilibrium_discrete_velocities[0, :, RIGHT_VELOCITIES]\n )\n \n # (5) Collide according to BGK\n discrete_velocities_post_collision = (\n discrete_velocities_prev\n -\n relaxation_omega\n *\n (\n discrete_velocities_prev\n -\n equilibrium_discrete_velocities\n )\n )\n\n # (6) Bounce-Back Boundary Conditions to enfore the no-slip\n for i in range(N_DISCRETE_VELOCITIES):\n discrete_velocities_post_collision =\\\n discrete_velocities_post_collision.at[obstacle_mask, LATTICE_INDICES[i]].set(\n discrete_velocities_prev[obstacle_mask, OPPOSITE_LATTICE_INDICES[i]]\n )\n \n # (7) Stream alongside lattice velocities\n discrete_velocities_streamed = discrete_velocities_post_collision\n for i in range(N_DISCRETE_VELOCITIES):\n discrete_velocities_streamed = discrete_velocities_streamed.at[:, :, i].set(\n jnp.roll(\n jnp.roll(\n discrete_velocities_post_collision[:, :, i],\n LATTICE_VELOCITIES[0, i],\n axis=0,\n ),\n LATTICE_VELOCITIES[1, i],\n axis=1,\n )\n )\n \n return discrete_velocities_streamed\n\n\n discrete_velocities_prev = get_equilibrium_discrete_velocities(\n velocity_profile,\n jnp.ones((N_POINTS_X, N_POINTS_Y)),\n )\n\n plt.style.use(\"dark_background\")\n plt.figure(figsize=(15, 6), dpi=100)\n\n for iteration_index in tqdm(range(N_ITERATIONS)):\n discrete_velocities_next = update(discrete_velocities_prev)\n\n discrete_velocities_prev = discrete_velocities_next\n\n if iteration_index % PLOT_EVERY_N_STEPS == 0 and VISUALIZE and iteration_index > SKIP_FIRST_N_ITERATIONS:\n density = get_density(discrete_velocities_next)\n macroscopic_velocities = get_macroscopic_velocities(\n discrete_velocities_next,\n density,\n )\n velocity_magnitude = jnp.linalg.norm(\n macroscopic_velocities,\n axis=-1,\n ord=2,\n )\n d_u__d_x, d_u__d_y = jnp.gradient(macroscopic_velocities[..., 0])\n d_v__d_x, d_v__d_y = jnp.gradient(macroscopic_velocities[..., 1])\n curl = (d_u__d_y - d_v__d_x)\n\n # Velocity Magnitude Contour Plot in the top\n plt.subplot(211)\n plt.contourf(\n X,\n Y,\n velocity_magnitude,\n levels=50,\n cmap=cmr.amber,\n )\n plt.colorbar().set_label(\"Velocity Magnitude\")\n plt.gca().add_patch(plt.Circle(\n (CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),\n CYLINDER_RADIUS_INDICES,\n color=\"darkgreen\",\n ))\n\n # Vorticity Magnitude Contour PLot in the bottom\n plt.subplot(212)\n plt.contourf(\n X,\n Y, \n curl,\n levels=50,\n cmap=cmr.redshift,\n vmin=-0.02,\n vmax= 0.02,\n )\n plt.colorbar().set_label(\"Vorticity Magnitude\")\n plt.gca().add_patch(plt.Circle(\n (CYLINDER_CENTER_INDEX_X, CYLINDER_CENTER_INDEX_Y),\n CYLINDER_RADIUS_INDICES,\n color=\"darkgreen\",\n ))\n\n plt.draw()\n plt.pause(0.0001)\n plt.clf()\n \n if VISUALIZE:\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.contourf", "matplotlib.pyplot.pause", "matplotlib.pyplot.draw", "matplotlib.pyplot.Circle", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "matplotlib.pyplot.clf", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
take2rohit/monk_v1
[ "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92", "9c567bf2c8b571021b120d879ba9edf7751b9f92" ]
[ "monk/system_unit_tests/keras/test_block_squeezenet_fire.py", "monk/pip_unit_tests/pytorch/test_block_inception_d.py", "monk/system_unit_tests/pytorch/test_loss_l1.py", "monk/pip_unit_tests/keras/test_layer_global_average_pooling2d.py", "monk/pip_unit_tests/pytorch/test_activation_rrelu.py", "monk/pip_unit_tests/keras/test_layer_average_pooling1d.py", "monk/pip_unit_tests/keras/test_optimizer_adam.py", "monk/pip_unit_tests/keras/test_optimizer_rmsprop.py", "monk/system_unit_tests/keras/test_optimizer_adamax.py", "monk/system_unit_tests/pytorch/test_loss_hinge.py", "monk/pip_unit_tests/keras/test_activation_tanh.py", "monk/system_unit_tests/keras/test_loss_binary_crossentropy.py", "monk/pip_unit_tests/gluon/test_layer_concatenate.py" ]
[ "import os\nimport sys\nsys.path.append(\"../../../../monk_v1/\");\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom keras_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\nimport numpy as np\n\n\ndef test_block_squeezenet_fire(system_dict):\n forward = True;\n\n test = \"test_block_squeezenet_fire\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.squeezenet_fire_block(squeeze_channels=16, expand_channels_1x1=32, expand_channels_3x3=64));\n gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);\n\n x = tf.placeholder(tf.float32, shape=(1, 64, 64, 1))\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.pytorch_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.pytorch.common import print_start\nfrom monk.pip_unit_tests.pytorch.common import print_status\n\nimport torch\nimport numpy as np\nfrom monk.pytorch.losses.return_loss import load_loss\n\n\ndef test_block_inception_d(system_dict):\n forward = True;\n\n test = \"test_block_inception_d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.inception_d_block(pool_type=\"avg\"));\n network.append(gtf.inception_d_block(pool_type=\"max\"));\n gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);\n\n x = torch.randn(1, 1, 64, 64);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\nsys.path.append(\"../../../../monk_v1/\");\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\ndef test_loss_l1(system_dict):\n forward = True;\n\n test = \"test_loss_l1\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n label = torch.randn(1, 5);\n\n y = torch.randn(1, 5);\n\n gtf.loss_l1();\n load_loss(gtf.system_dict);\n loss_obj = gtf.system_dict[\"local\"][\"criterion\"];\n loss_val = loss_obj(y, label); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.keras_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.keras.common import print_start\nfrom monk.pip_unit_tests.keras.common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\nimport numpy as np\n\n\ndef test_layer_global_average_pooling2d(system_dict):\n forward = True;\n\n test = \"test_layer_global_average_pooling2d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.global_average_pooling2d());\n gtf.Compile_Network(network, data_shape=(3, 32, 32), use_gpu=False);\n\n x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3))\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.pytorch_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.pytorch.common import print_start\nfrom monk.pip_unit_tests.pytorch.common import print_status\n\nimport torch\nimport numpy as np\nfrom monk.pytorch.losses.return_loss import load_loss\n\n\ndef test_activation_rrelu(system_dict):\n forward = True;\n\n test = \"test_activation_rrelu\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.rrelu());\n gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False);\n\n x = torch.randn(1, 3, 64, 64);\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.keras_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.keras.common import print_start\nfrom monk.pip_unit_tests.keras.common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\nimport numpy as np\n\n\ndef test_layer_average_pooling1d(system_dict):\n forward = True;\n\n test = \"test_layer_average_pooling1d\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.average_pooling1d(kernel_size=3));\n gtf.Compile_Network(network, data_shape=(3, 32), use_gpu=False);\n\n x = tf.placeholder(tf.float32, shape=(1, 32, 3))\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.keras_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.keras.common import print_start\nfrom monk.pip_unit_tests.keras.common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.enable_v2_behavior()\n\n\ndef test_optimizer_adam(system_dict):\n forward = True;\n if(not os.path.isdir(\"datasets\")):\n os.system(\"! wget --load-cookies /tmp/cookies.txt \\\"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\\\" -O datasets.zip && rm -rf /tmp/cookies.txt\")\n os.system(\"! unzip -qq datasets.zip\")\n\n test = \"test_optimizer_adam\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n gtf.Default(dataset_path=\"datasets/dataset_cats_dogs_train\", \n model_name=\"resnet50\", freeze_base_network=True, num_epochs=2);\n gtf.optimizer_adam(0.01, weight_decay=0.0001, beta1=0.9, beta2=0.999, \n \tclipnorm=1.0, clipvalue=0.5);\n gtf.Train();\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.keras_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.keras.common import print_start\nfrom monk.pip_unit_tests.keras.common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.enable_v2_behavior()\n \ndef test_optimizer_rmsprop(system_dict):\n forward = True;\n if(not os.path.isdir(\"datasets\")):\n os.system(\"! wget --load-cookies /tmp/cookies.txt \\\"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\\\" -O datasets.zip && rm -rf /tmp/cookies.txt\")\n os.system(\"! unzip -qq datasets.zip\")\n\n test = \"test_optimizer_rmsprop\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n gtf.Default(dataset_path=\"datasets/dataset_cats_dogs_train\", \n model_name=\"resnet50\", freeze_base_network=True, num_epochs=2);\n gtf.optimizer_rmsprop(0.01, weight_decay=0.0001, decay_rate=0.9, \n clipnorm=1.0, clipvalue=0.5);\n gtf.Train();\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\nsys.path.append(\"../../../../monk_v1/\");\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom keras_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.enable_v2_behavior()\n\ndef test_optimizer_adamax(system_dict):\n forward = True;\n if(not os.path.isdir(\"datasets\")):\n os.system(\"! wget --load-cookies /tmp/cookies.txt \\\"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\\\" -O datasets.zip && rm -rf /tmp/cookies.txt\")\n os.system(\"! unzip -qq datasets.zip\")\n\n test = \"test_optimizer_adamax\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n gtf.Default(dataset_path=\"datasets/dataset_cats_dogs_train\", \n model_name=\"resnet50\", freeze_base_network=True, num_epochs=2);\n gtf.optimizer_adamax(0.01, weight_decay=0.0001, beta1=0.9, beta2=0.999, \n \tclipnorm=1.0, clipvalue=0.5);\n gtf.Train();\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\nsys.path.append(\"../../../../monk_v1/\");\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom pytorch_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport torch\nimport numpy as np\nfrom pytorch.losses.return_loss import load_loss\n\n\n\n\ndef test_loss_hinge(system_dict):\n forward = True;\n\n test = \"test_loss_hinge\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n label = torch.randn(1, 5);\n\n y = torch.randn(1, 5);\n\n gtf.loss_hinge();\n load_loss(gtf.system_dict);\n loss_obj = gtf.system_dict[\"local\"][\"criterion\"];\n loss_val = loss_obj(y, label); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.keras_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.keras.common import print_start\nfrom monk.pip_unit_tests.keras.common import print_status\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.disable_v2_behavior()\nimport numpy as np\n\n\ndef test_activation_tanh(system_dict):\n forward = True;\n\n test = \"test_activation_tanh\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.tanh());\n gtf.Compile_Network(network, data_shape=(3, 32, 32), use_gpu=False);\n\n x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3))\n y = gtf.system_dict[\"local\"][\"model\"](x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\nsys.path.append(\"../../../../monk_v1/\");\nsys.path.append(\"../../../monk/\");\nimport psutil\n\nfrom keras_prototype import prototype\nfrom compare_prototype import compare\nfrom common import print_start\nfrom common import print_status\n\nimport numpy as np\nfrom tf_keras_1.losses.return_loss import load_loss\nfrom keras import backend as K\n\nimport tensorflow as tf\nif(tf.__version__[0] == '2'):\n import tensorflow.compat.v1 as tf\n tf.enable_v2_behavior()\n\ndef test_loss_binary_crossentropy(system_dict):\n forward = True;\n\n test = \"test_loss_binary_crossentropy\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n y = np.random.randn(1, 5);\n label = np.random.randn(1, 5);\n\n y = K.constant(y);\n label = K.constant(label);\n\n gtf.loss_binary_crossentropy();\n load_loss(gtf.system_dict);\n loss_obj = gtf.system_dict[\"local\"][\"criterion\"];\n loss_val = loss_obj(label, y); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n", "import os\nimport sys\n\nimport psutil\n\nfrom monk.gluon_prototype import prototype\nfrom monk.compare_prototype import compare\nfrom monk.pip_unit_tests.gluon.common import print_start\nfrom monk.pip_unit_tests.gluon.common import print_status\n\nimport mxnet as mx\nimport numpy as np\nfrom monk.gluon.losses.return_loss import load_loss\n\n\ndef test_layer_concatenate(system_dict):\n forward = True;\n\n test = \"test_layer_concatenate\";\n system_dict[\"total_tests\"] += 1;\n print_start(test, system_dict[\"total_tests\"])\n if(forward):\n try:\n gtf = prototype(verbose=0);\n gtf.Prototype(\"sample-project-1\", \"sample-experiment-1\");\n\n\n network = [];\n network.append(gtf.convolution(output_channels=16, uid=\"conv1\"));\n network.append(gtf.batch_normalization(uid=\"bn1\"));\n network.append(gtf.relu(uid=\"relu1\"));\n network.append(gtf.max_pooling(uid=\"pool1\"));\n\n\n\n subnetwork = [];\n branch1 = [];\n branch1.append(gtf.convolution(output_channels=16, uid=\"conv3_1_1\"));\n branch1.append(gtf.batch_normalization(uid=\"bn3_1_1\"));\n branch1.append(gtf.convolution(output_channels=16, uid=\"conv3_1_2\"));\n branch1.append(gtf.batch_normalization(uid=\"bn3_1_2\"));\n\n branch2 = [];\n branch2.append(gtf.convolution(output_channels=16, uid=\"conv3_2_1\"));\n branch2.append(gtf.batch_normalization(uid=\"bn3_2_1\"));\n\n branch3 = [];\n branch3.append(gtf.identity(uid=\"identity1\"))\n\n subnetwork.append(branch1);\n subnetwork.append(branch2);\n subnetwork.append(branch3);\n subnetwork.append(gtf.concatenate(uid=\"concat1\"))\n\n\n network.append(subnetwork);\n\n\n\n network.append(gtf.convolution(output_channels=16, uid=\"conv4\"));\n network.append(gtf.batch_normalization(uid=\"bn4\"));\n network.append(gtf.relu(uid=\"relu3\"));\n network.append(gtf.max_pooling(uid=\"pool4\"));\n\n network.append(gtf.flatten(uid=\"flatten1\"));\n network.append(gtf.fully_connected(units=1024, uid=\"fc1\"));\n network.append(gtf.dropout(drop_probability=0.2, uid=\"dp1\"));\n network.append(gtf.fully_connected(units=2, uid=\"fc2\"));\n\n\n gtf.Compile_Network(network, use_gpu=False);\n\n x = np.random.rand(1, 1, 64, 64);\n x = mx.nd.array(x);\n y = gtf.system_dict[\"local\"][\"model\"].forward(x); \n\n system_dict[\"successful_tests\"] += 1;\n print_status(\"Pass\");\n\n except Exception as e:\n system_dict[\"failed_tests_exceptions\"].append(e);\n system_dict[\"failed_tests_lists\"].append(test);\n forward = False;\n print_status(\"Fail\");\n else:\n system_dict[\"skipped_tests_lists\"].append(test);\n print_status(\"Skipped\");\n\n return system_dict\n" ]
[ [ "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.disable_v2_behavior" ], [ "torch.randn" ], [ "torch.randn" ], [ "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.disable_v2_behavior" ], [ "torch.randn" ], [ "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.disable_v2_behavior" ], [ "tensorflow.compat.v1.enable_v2_behavior" ], [ "tensorflow.compat.v1.enable_v2_behavior" ], [ "tensorflow.compat.v1.enable_v2_behavior" ], [ "torch.randn" ], [ "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.disable_v2_behavior" ], [ "tensorflow.compat.v1.enable_v2_behavior", "numpy.random.randn" ], [ "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wangqingyu985/OpenStereo
[ "91d605357d65281b99b0d8cf45e3f15f0543c9fa", "91d605357d65281b99b0d8cf45e3f15f0543c9fa" ]
[ "models/CFPNet/submodule.py", "GFLOPs/HSMNet_GFLOPs/loss.py" ]
[ "from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),\n nn.BatchNorm2d(out_planes)\n )\n\n\ndef convbn_3d(in_planes, out_planes, kernel_size, stride, pad):\n return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride, bias=False),\n nn.BatchNorm3d(out_planes)\n )\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, kernel_size, stride, downsample, pad, dilation):\n super(BasicBlock, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(inplanes, planes, kernel_size, stride, pad, dilation),\n nn.ReLU(inplace=True)\n )\n self.conv2 = convbn(planes, planes, kernel_size, 1, pad, dilation)\n self.stride = stride\n self.downsample = downsample\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n if self.downsample is not None:\n x = self.downsample(x)\n out += x\n return out\n\n\nclass matchshifted(nn.Module):\n def __init__(self):\n super(matchshifted, self).__init__()\n\n def forward(self, left, right, shift):\n batch, filters, height, width = left.size()\n shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift, width)])).cuda()), (shift, 0, 0, 0))\n shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width-shift)])).cuda()), (shift, 0, 0, 0))\n out = torch.cat((shifted_left, shifted_right), 1).view(batch, filters*2, 1, height, width)\n return out\n\n\nclass disparityregression(nn.Module):\n def __init__(self, maxdisp):\n super(disparityregression, self).__init__()\n self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda(), requires_grad=False)\n\n def forward(self, x):\n disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])\n out = torch.sum(x*disp, 1)\n return out\n\n\nclass feature_extraction(nn.Module):\n def __init__(self):\n super(feature_extraction, self).__init__()\n self.inplanes = 32\n self.layer0 = nn.Sequential(convbn(in_planes=3, out_planes=32, kernel_size=3, stride=1, pad=1, dilation=1),\n nn.ReLU(inplace=True)\n )\n self.layer1 = self._make_layer(block=BasicBlock, planes=32, blocks=3, kernel_size=3, stride=2, pad=1, dilation=1, order=1)\n self.layer2 = self._make_layer(BasicBlock, 64, 8, 3, 2, 1, 1, 1)\n self.layer3 = self._make_layer(BasicBlock, 128, 3, 3, 2, 1, 1, 2)\n\n self.layer1_after = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n self.layer2_after = nn.Sequential(convbn(32, 64, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n self.layer3_after = nn.Sequential(convbn(64, 128, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n self.layer1_final = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dilat1 = nn.Sequential(convbn(128, 32, 3, 1, 1, 32),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dilat2 = nn.Sequential(convbn(128, 32, 3, 1, 1, 16),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.dilat3 = nn.Sequential(convbn(128, 32, 3, 1, 1, 8),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 4),\n nn.ReLU(inplace=True))\n\n self.dilat4 = nn.Sequential(convbn(128, 32, 3, 1, 1, 6),\n nn.ReLU(inplace=True),\n convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n\n self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64, 64)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)),\n convbn(128, 32, 1, 1, 0, 1),\n nn.ReLU(inplace=True))\n\n self.concat_dilate_pool = nn.Sequential(convbn(64, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))\n\n self.lastconv = nn.Sequential(convbn(352, 128, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))\n\n def _make_layer(self, block, planes, blocks, kernel_size, stride, pad, dilation, order):\n downsample = None\n if stride != 1:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes * order, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),)\n layers = []\n layers.append(block(self.inplanes*order, planes, kernel_size, stride, downsample, pad, dilation))\n if blocks != 1:\n for i in range(1, blocks):\n layers.append(block(planes, planes, kernel_size, 1, None, pad, dilation))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out_0 = self.layer0(x)\n out_1 = self.layer1(out_0)\n out_1_a = self.layer1_after(out_0)\n out_1 = out_1 + out_1_a\n out_2 = self.layer2(out_1)\n out_2_a = self.layer2_after(out_1)\n out_2 = out_2 + out_2_a\n out_3 = self.layer3(out_2)\n out_3_a = self.layer3_after(out_2)\n out_3 = out_3 + out_3_a\n out_1 = self.layer1_final(out_1)\n inPooling = F.upsample(out_3, (out_2.size()[2], out_2.size()[3]), mode='bilinear')\n #Pooling \n output_dilate1 = self.dilat1(inPooling)\n output_dilate2 = self.dilat2(inPooling)\n output_dilate3 = self.dilat3(inPooling)\n output_dilate4 = self.dilat4(inPooling)\n\n output_branch1 = self.branch1(inPooling)\n output_branch1 = F.upsample(output_branch1, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n output_branch2 = self.branch2(inPooling)\n output_branch2 = F.upsample(output_branch2, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n output_branch3 = self.branch3(inPooling)\n output_branch3 = F.upsample(output_branch3, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n output_branch4 = self.branch4(inPooling)\n output_branch4 = F.upsample(output_branch4, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')\n\n #concat dilate and avgpool\n out_fusion1 = torch.cat((output_dilate1, output_branch1), 1)\n out_fusion1 = self.concat_dilate_pool(out_fusion1)\n\n out_fusion2 = torch.cat((output_dilate2, output_branch2), 1)\n out_fusion2 = self.concat_dilate_pool(out_fusion2)\n\n out_fusion3 = torch.cat((output_dilate3, output_branch3), 1)\n out_fusion3 = self.concat_dilate_pool(out_fusion3)\n\n out_fusion4 = torch.cat((output_dilate4, output_branch4), 1)\n out_fusion4 = self.concat_dilate_pool(out_fusion4)\n\n output_feature = torch.cat((out_1, out_2, inPooling, out_fusion1, out_fusion2, out_fusion3, out_fusion4), 1)\n output_feature = self.lastconv(output_feature)\n\n return output_feature\n", "import torch.nn.functional as F\nimport torch.nn as nn\n\n\nclass SmoothL1LossHSM(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n def forward(self, stacked, target, mask):\n loss = (64. / 85) * F.smooth_l1_loss(stacked[0][mask], target[mask], size_average=True) + \\\n (16. / 85) * F.smooth_l1_loss(stacked[1][mask], target[mask], size_average=True) + \\\n (4. / 85) * F.smooth_l1_loss(stacked[2][mask], target[mask], size_average=True) + \\\n (1. / 85) * F.smooth_l1_loss(stacked[3][mask], target[mask], size_average=True)\n return loss\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.Conv2d", "torch.sum", "torch.nn.Conv3d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.BatchNorm3d" ], [ "torch.nn.functional.smooth_l1_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vincentadam87/SVGPs
[ "0de1194bf0f24997148dfce0cd6fbffae16fb3bc" ]
[ "SVGPs/functions.py" ]
[ "# Copyright 2016 James Hensman, alexggmatthews\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# ------------------------------------------\n# Modification notice:\n# This file was modified by Vincent ADAM\n# ------------------------------------------\n\nimport tensorflow as tf\nfrom settings import float_type\nfrom quadrature import hermgauss\nimport numpy as np\n\n\ndef eye(N):\n \"\"\"\n An identitiy matrix\n \"\"\"\n return tf.diag(tf.ones(tf.stack([N, ]), dtype=float_type))\n\n\ndef variational_expectations( Fmu, Fvar, phi, num_gauss_hermite_points=20):\n \"\"\"\n Compute the expected value of a function phi, given a Gaussian\n distribution for the input values.\n if\n q(f) = N(Fmu, Fvar)\n then this method computes\n \\int phi(f) q(f) df.\n Here, we implement a default Gauss-Hermite quadrature routine\n \"\"\"\n gh_x, gh_w = hermgauss(num_gauss_hermite_points)\n gh_x = gh_x.reshape(1, -1)\n gh_w = gh_w.reshape(-1, 1) / np.sqrt(np.pi)\n shape = tf.shape(Fmu)\n Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)]\n X = gh_x * tf.sqrt(2.0 * Fvar) + Fmu\n logp = phi(X)\n return tf.reshape(tf.matmul(logp, gh_w), shape)\n\n\n\nimport tensorflow as tf\n\ndef block_diagonal(matrices, dtype=tf.float32):\n \"\"\"Constructs block-diagonal matrices from a list of batched 2D tensors.\n Args:\n matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of\n matrices with the same batch dimension).\n dtype: Data type to use. The Tensors in `matrices` must match this dtype.\n Returns:\n A matrix with the input matrices stacked along its main diagonal, having\n shape [..., \\sum_i N_i, \\sum_i M_i].\n \n \"\"\"\n matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]\n blocked_rows = tf.Dimension(0)\n blocked_cols = tf.Dimension(0)\n batch_shape = tf.TensorShape(None)\n for matrix in matrices:\n full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n blocked_rows += full_matrix_shape[-2]\n blocked_cols += full_matrix_shape[-1]\n ret_columns_list = []\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n ret_columns_list.append(matrix_shape[-1])\n ret_columns = tf.add_n(ret_columns_list)\n row_blocks = []\n current_column = 0\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n row_before_length = current_column\n current_column += matrix_shape[-1]\n row_after_length = ret_columns - current_column\n row_blocks.append(tf.pad(\n tensor=matrix,\n paddings=tf.concat(\n [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n [(row_before_length, row_after_length)]],\n axis=0)))\n blocked = tf.concat(row_blocks, -2)\n blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n return blocked" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.TensorShape", "tensorflow.matmul", "tensorflow.concat", "numpy.sqrt", "tensorflow.shape", "tensorflow.Dimension", "tensorflow.stack", "tensorflow.reshape", "tensorflow.rank", "tensorflow.sqrt", "tensorflow.add_n" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
ttung/napari
[ "fa97a05b763dacc71d4c47e6b4b2a97c208e3551" ]
[ "napari/components/add_layers_mixin.py" ]
[ "import itertools\nimport numpy as np\n\nfrom .. import layers\nfrom ..utils import colormaps\nfrom ..utils.misc import ensure_iterable, is_iterable\nfrom ..utils import io\n\n\nclass AddLayersMixin:\n \"\"\"A mixin that adds add_* methods for adding layers to the ViewerModel.\n\n Each method corresponds to adding one or more layers to the viewer.\n Methods that just add a single layer contain the keyword arguments and\n copies of the documentation from that the layer. These are copied and\n pasted instead of being autogenerated because IDEs like PyCharm parse the\n source code for docs instead of pulling it up dynamically.\n\n These methods are separated into a mixin to keep the ViewerModel class\n easier to read and make these methods easier to maintain.\n \"\"\"\n\n def add_layer(self, layer):\n \"\"\"Add a layer to the viewer.\n\n Parameters\n ----------\n layer : napari.layers.Layer\n Layer to add.\n \"\"\"\n layer.events.select.connect(self._update_active_layer)\n layer.events.deselect.connect(self._update_active_layer)\n layer.events.status.connect(self._update_status)\n layer.events.help.connect(self._update_help)\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.dims.events.ndisplay.connect(self._on_layers_change)\n layer.dims.events.order.connect(self._on_layers_change)\n layer.dims.events.range.connect(self._on_layers_change)\n self.layers.append(layer)\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n\n def add_image(\n self,\n data=None,\n *,\n channel_axis=None,\n rgb=None,\n is_pyramid=None,\n colormap=None,\n contrast_limits=None,\n gamma=1,\n interpolation='nearest',\n rendering='mip',\n iso_threshold=0.5,\n attenuation=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending=None,\n visible=True,\n path=None,\n ):\n \"\"\"Add an image layer to the layers list.\n\n Parameters\n ----------\n data : array or list of array\n Image data. Can be N dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a\n list and arrays are decreasing in shape then the data is treated as\n an image pyramid.\n channel_axis : int, optional\n Axis to expand image along.\n rgb : bool\n Whether the image is rgb RGB or RGBA. If not specified by user and\n the last dimension of the data has length 3 or 4 it will be set as\n `True`. If `False` the image is interpreted as a luminance image.\n is_pyramid : bool\n Whether the data is an image pyramid or not. Pyramid data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be a pyramid. The first image in the list\n should be the largest.\n colormap : str, vispy.Color.Colormap, tuple, dict, list\n Colormaps to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap. If a list then must be same length as the axis that is\n being expanded as channels, and each colormap is applied to each\n new image layer.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image. If list of lists then must be same length as the axis\n that is being expanded and then each colormap is applied to each\n image.\n gamma : list, float\n Gamma correction for determining colormap linearity. Defaults to 1.\n If a list then must be same length as the axis that is being\n expanded and then each entry in the list is applied to each image.\n interpolation : str\n Interpolation mode used by vispy. Must be one of our supported\n modes.\n rendering : str\n Rendering mode used by vispy. Must be one of our supported\n modes.\n iso_threshold : float\n Threshold for isosurface.\n attenuation : float\n Attenuation rate for attenuated maximum intensity projection.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n path : str or list of str\n Path or list of paths to image data. Paths can be passed as strings\n or `pathlib.Path` instances.\n\n Returns\n -------\n layer : :class:`napari.layers.Image` or list\n The newly-created image layer or list of image layers.\n \"\"\"\n if data is None and path is None:\n raise ValueError(\"One of either data or path must be provided\")\n elif data is not None and path is not None:\n raise ValueError(\"Only one of data or path can be provided\")\n elif data is None:\n data = io.magic_imread(path)\n\n if channel_axis is None:\n if colormap is None:\n colormap = 'gray'\n if blending is None:\n blending = 'translucent'\n layer = layers.Image(\n data,\n rgb=rgb,\n is_pyramid=is_pyramid,\n colormap=colormap,\n contrast_limits=contrast_limits,\n gamma=gamma,\n interpolation=interpolation,\n rendering=rendering,\n iso_threshold=iso_threshold,\n attenuation=attenuation,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n else:\n if is_pyramid:\n n_channels = data[0].shape[channel_axis]\n else:\n n_channels = data.shape[channel_axis]\n\n name = ensure_iterable(name)\n\n if blending is None:\n blending = 'additive'\n\n if colormap is None:\n if n_channels < 3:\n colormap = colormaps.MAGENTA_GREEN\n else:\n colormap = itertools.cycle(colormaps.CYMRGB)\n else:\n colormap = ensure_iterable(colormap)\n\n # If one pair of clim values is passed then need to iterate them to\n # all layers.\n if contrast_limits is not None and not is_iterable(\n contrast_limits[0]\n ):\n contrast_limits = itertools.repeat(contrast_limits)\n else:\n contrast_limits = ensure_iterable(contrast_limits)\n\n gamma = ensure_iterable(gamma)\n\n layer_list = []\n zipped_args = zip(\n range(n_channels), colormap, contrast_limits, gamma, name\n )\n for i, cmap, clims, _gamma, name in zipped_args:\n if is_pyramid:\n image = [\n np.take(data[j], i, axis=channel_axis)\n for j in range(len(data))\n ]\n else:\n image = np.take(data, i, axis=channel_axis)\n layer = layers.Image(\n image,\n rgb=rgb,\n colormap=cmap,\n contrast_limits=clims,\n gamma=_gamma,\n interpolation=interpolation,\n rendering=rendering,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n layer_list.append(layer)\n return layer_list\n\n def add_points(\n self,\n data=None,\n *,\n properties=None,\n symbol='o',\n size=10,\n edge_width=1,\n edge_color='black',\n edge_color_cycle=None,\n edge_colormap='viridis',\n edge_contrast_limits=None,\n face_color='white',\n face_color_cycle=None,\n face_colormap='viridis',\n face_contrast_limits=None,\n n_dimensional=False,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a points layer to the layers list.\n\n Parameters\n ----------\n data : array (N, D)\n Coordinates for N points in D dimensions.\n properties : dict {str: array (N,)}, DataFrame\n Properties for each point. Each property should be an array of length N,\n where N is the number of points.\n symbol : str\n Symbol to be used for the point markers. Must be one of the\n following: arrow, clobber, cross, diamond, disc, hbar, ring,\n square, star, tailed_arrow, triangle_down, triangle_up, vbar, x.\n size : float, array\n Size of the point marker. If given as a scalar, all points are made\n the same size. If given as an array, size must be the same\n broadcastable to the same shape as the data.\n edge_width : float\n Width of the symbol edge in pixels.\n edge_color : str, array-like\n Color of the point marker border. Numeric color values should be RGB(A).\n edge_color_cycle : np.ndarray, list, cycle\n Cycle of colors (provided as RGBA) to map to edge_color if a\n categorical attribute is used to set face_color.\n edge_colormap : str, vispy.color.colormap.Colormap\n Colormap to set edge_color if a continuous attribute is used to set face_color.\n See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap\n edge_contrast_limits : None, (float, float)\n clims for mapping the property to a color map. These are the min and max value\n of the specified property that are mapped to 0 and 1, respectively.\n The default value is None. If set the none, the clims will be set to\n (property.min(), property.max())\n face_color : str, array-like\n Color of the point marker body. Numeric color values should be RGB(A).\n face_color_cycle : np.ndarray, list, cycle\n Cycle of colors (provided as RGBA) to map to face_color if a\n categorical attribute is used to set face_color.\n face_colormap : str, vispy.color.colormap.Colormap\n Colormap to set face_color if a continuous attribute is used to set face_color.\n See vispy docs for details: http://vispy.org/color.html#vispy.color.Colormap\n face_contrast_limits : None, (float, float)\n clims for mapping the property to a color map. These are the min and max value\n of the specified property that are mapped to 0 and 1, respectively.\n The default value is None. If set the none, the clims will be set to\n (property.min(), property.max())\n n_dimensional : bool\n If True, renders points not just in central plane but also in all\n n-dimensions according to specified point marker size.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Points`\n The newly-created points layer.\n\n Notes\n -----\n See vispy's marker visual docs for more details:\n http://api.vispy.org/en/latest/visuals.html#vispy.visuals.MarkersVisual\n \"\"\"\n if data is None:\n ndim = max(self.dims.ndim, 2)\n data = np.empty([0, ndim])\n\n layer = layers.Points(\n data=data,\n properties=properties,\n symbol=symbol,\n size=size,\n edge_width=edge_width,\n edge_color=edge_color,\n edge_color_cycle=edge_color_cycle,\n edge_colormap=edge_colormap,\n edge_contrast_limits=edge_contrast_limits,\n face_color=face_color,\n face_color_cycle=face_color_cycle,\n face_colormap=face_colormap,\n face_contrast_limits=face_contrast_limits,\n n_dimensional=n_dimensional,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_labels(\n self,\n data=None,\n *,\n is_pyramid=None,\n num_colors=50,\n seed=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n path=None,\n ):\n \"\"\"Add a labels (or segmentation) layer to the layers list.\n\n An image-like layer where every pixel contains an integer ID\n corresponding to the region it belongs to.\n\n Parameters\n ----------\n data : array or list of array\n Labels data as an array or pyramid.\n is_pyramid : bool\n Whether the data is an image pyramid or not. Pyramid data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be a pyramid. The first image in the list\n should be the largest.\n num_colors : int\n Number of unique colors to use in colormap.\n seed : float\n Seed for colormap random generator.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n path : str or list of str\n Path or list of paths to image data. Paths can be passed as strings\n or `pathlib.Path` instances.\n\n Returns\n -------\n layer : :class:`napari.layers.Labels`\n The newly-created labels layer.\n \"\"\"\n if data is None and path is None:\n raise ValueError(\"One of either data or path must be provided\")\n elif data is not None and path is not None:\n raise ValueError(\"Only one of data or path can be provided\")\n elif data is None:\n data = io.magic_imread(path)\n\n layer = layers.Labels(\n data,\n is_pyramid=is_pyramid,\n num_colors=num_colors,\n seed=seed,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_shapes(\n self,\n data=None,\n *,\n shape_type='rectangle',\n edge_width=1,\n edge_color='black',\n face_color='white',\n z_index=0,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a shapes layer to the layers list.\n\n Parameters\n ----------\n data : list or array\n List of shape data, where each element is an (N, D) array of the\n N vertices of a shape in D dimensions. Can be an 3-dimensional\n array if each shape has the same number of vertices.\n shape_type : string or list\n String of shape shape_type, must be one of \"{'line', 'rectangle',\n 'ellipse', 'path', 'polygon'}\". If a list is supplied it must be\n the same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_width : float or list\n Thickness of lines and edges. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n edge_color : str or list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n face_color : str or list\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements. If a list is supplied it must be the same length as\n the length of `data` and each element will be applied to each shape\n otherwise the same value will be used for all shapes.\n z_index : int or list\n Specifier of z order priority. Shapes with higher z order are\n displayed ontop of others. If a list is supplied it must be the\n same length as the length of `data` and each element will be\n applied to each shape otherwise the same value will be used for all\n shapes.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float or list\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Shapes`\n The newly-created shapes layer.\n \"\"\"\n if data is None:\n ndim = max(self.dims.ndim, 2)\n data = np.empty((0, 0, ndim))\n\n layer = layers.Shapes(\n data=data,\n shape_type=shape_type,\n edge_width=edge_width,\n edge_color=edge_color,\n face_color=face_color,\n z_index=z_index,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_surface(\n self,\n data,\n *,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a surface layer to the layers list.\n\n Parameters\n ----------\n data : 3-tuple of array\n The first element of the tuple is an (N, D) array of vertices of\n mesh triangles. The second is an (M, 3) array of int of indices\n of the mesh triangles. The third element is the (K0, ..., KL, N)\n array of values used to color vertices where the additional L\n dimensions are used to color the same mesh with different values.\n colormap : str, vispy.Color.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n gamma : float\n Gamma correction for determining colormap linearity. Defaults to 1.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Surface`\n The newly-created surface layer.\n \"\"\"\n layer = layers.Surface(\n data,\n colormap=colormap,\n contrast_limits=contrast_limits,\n gamma=gamma,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def add_vectors(\n self,\n data,\n *,\n edge_width=1,\n edge_color='red',\n length=1,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ):\n \"\"\"Add a vectors layer to the layers list.\n\n Parameters\n ----------\n data : (N, 2, D) or (N1, N2, ..., ND, D) array\n An (N, 2, D) array is interpreted as \"coordinate-like\" data and a\n list of N vectors with start point and projections of the vector in\n D dimensions. An (N1, N2, ..., ND, D) array is interpreted as\n \"image-like\" data where there is a length D vector of the\n projections at each pixel.\n edge_width : float\n Width for all vectors in pixels.\n length : float\n Multiplicative factor on projections for length of all vectors.\n edge_color : str\n Edge color of all the vectors.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n Returns\n -------\n layer : :class:`napari.layers.Vectors`\n The newly-created vectors layer.\n \"\"\"\n layer = layers.Vectors(\n data,\n edge_width=edge_width,\n edge_color=edge_color,\n length=length,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer\n\n def _add_layer_from_data(\n self, data, meta: dict = None, layer_type: str = 'image'\n ):\n \"\"\"Add arbitrary layer data to the viewer.\n\n Primarily intended for usage by reader plugin hooks.\n\n Parameters\n ----------\n data : Any\n Data in a format that is valid for the corresponding `add_*` method\n of the specified ``layer_type``.\n meta : dict, optional\n Dict of keyword arguments that will be passed to the corresponding\n `add_*` method. MUST NOT contain any keyword arguments that are\n not valid for the corresponding method.\n layer_type : str\n Type of layer to add. MUST have a corresponding add_* method on\n on the viewer instance.\n\n Raises\n ------\n ValueError\n If ``layer_type`` is not one of the recognized layer types.\n TypeError\n If any keyword arguments in ``meta`` are unexpected for the\n corresponding `add_*` method for this layer_type.\n\n Examples\n --------\n A typical use case might be to upack a tuple of layer data with a\n specified layer_type.\n\n >>> viewer = napari.Viewer()\n >>> data = (\n ... np.random.random((10, 2)) * 20,\n ... {'face_color': 'blue'},\n ... 'points',\n ... )\n >>> viewer._add_layer_from_data(*data)\n\n \"\"\"\n\n layer_type = layer_type.lower()\n if layer_type not in layers.NAMES:\n raise ValueError(\n f\"Unrecognized layer_type: '{layer_type}'. \"\n f\"Must be one of: {layers.NAMES}.\"\n )\n\n try:\n add_method = getattr(self, 'add_' + layer_type)\n except AttributeError:\n raise NotImplementedError(\n f\"Sorry! {layer_type} is a valid layer type, but there is no \"\n f\"viewer.add_{layer_type} available yet.\"\n )\n\n try:\n add_method(data, **(meta or {}))\n except TypeError as exc:\n if 'unexpected keyword argument' in str(exc):\n bad_key = str(exc).split('keyword argument ')[-1]\n raise TypeError(\n \"_add_layer_from_data received an unexpected keyword \"\n f\"argument ({bad_key}) for layer type {layer_type}\"\n ) from exc\n" ]
[ [ "numpy.take", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
askerlee/craft
[ "921a47a4e81017e5baf49c2823958cf86a0c1fc2" ]
[ "core/gma.py" ]
[ "import torch\nfrom torch import nn, einsum\nfrom einops import rearrange\n\n# max_pos_size = 160\nclass RelPosEmb(nn.Module):\n def __init__(\n self,\n max_pos_size,\n dim_head\n ):\n super().__init__()\n self.rel_height = nn.Embedding(2 * max_pos_size - 1, dim_head)\n self.rel_width = nn.Embedding(2 * max_pos_size - 1, dim_head)\n\n deltas = torch.arange(max_pos_size).view(1, -1) - torch.arange(max_pos_size).view(-1, 1)\n # rel_ind[i, j] = j - i + 159.\n rel_ind = deltas + max_pos_size - 1\n self.register_buffer('rel_ind', rel_ind)\n\n def forward(self, q):\n # q: [8, 1, 46, 62, 128]\n batch, heads, h, w, c = q.shape\n # self.rel_ind[:h, :h]: [46, 46]\n # self.rel_ind[:w, :w]: [62, 62]\n # rel_ind[i,j] = j - i + 159, precomputed distance between i, j. \n # This assumes the input x (from which q is derived) is precisely on the grid.\n # This is fine when we do self-attention on x.\n # However, it will be somewhat limiting if we use RelPosEmb on cross-attention between two frames, \n # particularly when we use flow_init != 0 (on sintel), \n # we better get the positional encodings of x according to flow_init, instead of the grid of x.\n # However, an accurate computation of the relative distances between all input units is expensive.\n # Since values in flow_init are usually small, this inaccuracy may be negligible.\n height_emb = self.rel_height(self.rel_ind[:h, :h].reshape(-1))\n width_emb = self.rel_width( self.rel_ind[:w, :w].reshape(-1))\n\n # height_emb: [46*46, 128] => [46, 46, 1, 128]\n # width_emb: [62*62, 128] => [62, 1, 62, 128]\n # height_emb[i, j]: the embedding of element at (i,j) as a function of the height difference (i-j).\n # width_emb[i, j]: the embedding of element at (i,j) as a function of the width difference (i-j).\n height_emb = rearrange(height_emb, '(x u) d -> x u () d', x=h)\n width_emb = rearrange(width_emb, '(y v) d -> y () v d', y=w)\n \n # outer product? y, uv -> y u v b h x y d x u v d\n # height_score: [8, 1, 46, 62, 46, 1] <= [8, 1, 46, 62, 128] * [46, 46, 1, 128]\n # width_score: [8, 1, 46, 62, 1, 62]\n height_score = einsum('b h x y d, x u v d -> b h x y u v', q, height_emb)\n width_score = einsum('b h x y d, y u v d -> b h x y u v', q, width_emb)\n # height_score + width_score: [8, 1, 46, 62, 46, 62], 65071232 elements.\n return height_score + width_score\n\n\nclass Attention(nn.Module):\n def __init__(\n self,\n *,\n args,\n dim,\n max_pos_size = 100,\n heads = 4,\n dim_head = 128,\n ):\n super().__init__()\n self.args = args\n self.heads = heads\n self.scale = dim_head ** -0.5\n inner_dim = heads * dim_head\n\n self.to_qk = nn.Conv2d(dim, inner_dim * 2, 1, bias=False)\n\n self.pos_emb = RelPosEmb(max_pos_size, dim_head)\n self.pos_embed_weight = 1.0\n \n def forward(self, fmap):\n heads, b, c, h, w = self.heads, *fmap.shape\n\n # q, k: [8, 128, 46, 62]\n q, k = self.to_qk(fmap).chunk(2, dim=1)\n\n # q, k: [8, 1, 46, 62, 128]\n q, k = map(lambda t: rearrange(t, 'b (h d) x y -> b h x y d', h=heads), (q, k))\n # Why not scale k?\n q = self.scale * q\n \n if self.args.position_only:\n sim = self.pos_emb(q)\n\n elif self.args.position_and_content:\n # [..., 46, 62, ...] . [..., 46, 62, ...] => [..., 46, 62, 46, 62]\n sim_content = einsum('b h x y d, b h u v d -> b h x y u v', q, k)\n sim_pos = self.pos_emb(q)\n sim = sim_content + self.pos_embed_weight * sim_pos\n \n else:\n # q, k: [B, 1, 46, 62, 128]\n # sim: [B, 1, 46, 62, 46, 62]\n sim = einsum('b h x y d, b h u v d -> b h x y u v', q, k)\n\n sim = rearrange(sim, 'b h x y u v -> b h (x y) (u v)')\n attn = sim.softmax(dim=-1)\n\n return attn\n\n# Aggregate output is dim-dimensional, same as the input. No FFN is used.\nclass Aggregate(nn.Module):\n def __init__(\n self,\n args,\n dim,\n heads = 4,\n dim_head = 128,\n ):\n super().__init__()\n self.args = args\n self.heads = heads\n self.scale = dim_head ** -0.5\n inner_dim = heads * dim_head\n\n self.to_v = nn.Conv2d(dim, inner_dim, 1, bias=False)\n\n self.gamma = nn.Parameter(torch.zeros(1))\n\n if dim != inner_dim:\n self.project = nn.Conv2d(inner_dim, dim, 1, bias=False)\n else:\n self.project = None\n\n def forward(self, attn, fmap):\n heads, b, c, h, w = self.heads, *fmap.shape\n\n v = self.to_v(fmap)\n v = rearrange(v, 'b (h d) x y -> b h (x y) d', h=heads)\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h (x y) d -> b (h d) x y', x=h, y=w)\n\n # project is None for GMA. \n if self.project is not None:\n out = self.project(out)\n\n out = fmap + self.gamma * out\n\n return out\n\n\nif __name__ == \"__main__\":\n att = Attention(dim=128, heads=1)\n fmap = torch.randn(2, 128, 40, 90)\n out = att(fmap)\n\n print(out.shape)\n" ]
[ [ "torch.zeros", "torch.randn", "torch.einsum", "torch.nn.Conv2d", "torch.nn.Embedding", "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ThomasGmeinder/incubator-mxnet
[ "0f3c5da37bf1647e18fce26beb9f06f5d6183846" ]
[ "python/mxnet/ndarray/numpy/_op.py" ]
[ "# pylint: disable=C0302\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=unused-argument\n\"\"\"Namespace for numpy operators used in Gluon dispatched by F=ndarray.\"\"\"\n\nimport numpy as _np\nfrom ...base import numeric_types, integer_types\nfrom ...util import _sanity_check_params, set_module\nfrom ...util import wrap_np_unary_func, wrap_np_binary_func\nfrom ...context import current_context\nfrom . import _internal as _npi\nfrom ..ndarray import NDArray\n\n__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',\n 'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not',\n 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert',\n 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',\n 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',\n 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort',\n 'tensordot', 'eye', 'linspace',\n 'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',\n 'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',\n 'average', 'mean', 'maximum', 'minimum',\n 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',\n 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round',\n 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',\n 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',\n 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum',\n 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory',\n 'diff', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',\n 'where', 'bincount']\n\n\n@set_module('mxnet.ndarray.numpy')\ndef shape(a):\n \"\"\"\n Return the shape of an array.\n\n Parameters\n ----------\n a : array_like\n Input array.\n\n Returns\n -------\n shape : tuple of ints\n The elements of the shape tuple give the lengths of the\n corresponding array dimensions.\n\n See Also\n --------\n ndarray.shape : Equivalent array method.\n\n Examples\n --------\n >>> np.shape(np.eye(3))\n (3, 3)\n >>> np.shape([[1, 2]])\n (1, 2)\n >>> np.shape([0])\n (1,)\n >>> np.shape(0)\n ()\n \"\"\"\n return a.shape\n\n\n@set_module('mxnet.ndarray.numpy')\ndef zeros(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with zeros.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `numpy.float32`. Note that this\n behavior is different from NumPy's `zeros` function where `float64`\n is the default value, because `float32` is considered as the default\n data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of zeros with the given shape, dtype, and ctx.\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n dtype = _np.float32 if dtype is None else dtype\n return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with ones.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `numpy.float32`. Note that this\n behavior is different from NumPy's `ones` function where `float64`\n is the default value, because `float32` is considered as the default\n data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n Array of ones with the given shape, dtype, and ctx.\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n dtype = _np.float32 if dtype is None else dtype\n return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)\n\n\n# pylint: disable=too-many-arguments, redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef zeros_like(a, dtype=None, order='C', ctx=None, out=None):\n \"\"\"\n Return an array of zeros with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of zeros with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.zeros_like(x)\n array([[0., 0., 0.],\n [0., 0., 0.]])\n >>> np.zeros_like(x, int)\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.zeros_like(y)\n array([0., 0., 0.], dtype=float64)\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ones_like(a, dtype=None, order='C', ctx=None, out=None):\n \"\"\"\n Return an array of ones with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of ones with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n ones : Return a new array setting values to one.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.ones_like(x)\n array([[1., 1., 1.],\n [1., 1., 1.]])\n >>> np.ones_like(x, int)\n array([[1, 1, 1],\n [1, 1, 1]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.ones_like(y)\n array([1., 1., 1.], dtype=float64)\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef broadcast_to(array, shape):\n \"\"\"\n Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : ndarray or scalar\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n MXNetError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n \"\"\"\n if _np.isscalar(array):\n return full(shape, array)\n return _npi.broadcast_to(array, shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return a new array of given shape and type, filled with `fill_value`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n fill_value : scalar or ndarray\n Fill value.\n dtype : data-type, optional\n The desired data-type for the array. The default, `None`, means\n `np.array(fill_value).dtype`.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the given shape, dtype, and order.\n If `fill_value` is an ndarray, out will have the same context as `fill_value`\n regardless of the provided `ctx`.\n\n Notes\n -----\n This function differs from the original `numpy.full\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in\n the following way(s):\n - Have an additional `ctx` argument to specify the device\n - Have an additional `out` argument\n - Currently does not support `order` selection\n\n See Also\n --------\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> np.full((2, 2), 10)\n array([[10., 10.],\n [10., 10.]])\n >>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))\n array([[2, 2],\n [2, 2]], dtype=int32)\n\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n if isinstance(fill_value, NDArray):\n if dtype is None:\n ret = broadcast_to(fill_value, shape)\n else:\n ret = broadcast_to(fill_value, shape).astype(dtype)\n return ret\n dtype = _np.float32 if dtype is None else dtype\n return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)\n# pylint: enable=too-many-arguments, redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return a full array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n ctx: to specify the device, e.g. the i-th GPU.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the same shape and type as `a`.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6, dtype=int)\n >>> np.full_like(x, 1)\n array([1, 1, 1, 1, 1, 1], dtype=int64)\n >>> np.full_like(x, 0.1)\n array([0, 0, 0, 0, 0, 0], dtype=int64)\n >>> np.full_like(x, 0.1, dtype=np.float64)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)\n >>> np.full_like(x, np.nan, dtype=np.double)\n array([nan, nan, nan, nan, nan, nan], dtype=float64)\n >>> y = np.arange(6, dtype=np.float32)\n >>> np.full_like(y, 0.1)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n \"\"\"\n if order != 'C':\n raise NotImplementedError\n if ctx is None:\n ctx = current_context()\n return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621\n \"\"\"\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n prototype : ndarray\n The shape and data-type of `prototype` define these same attributes\n of the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n subok : {False}, optional\n If True, then the newly created array will use the sub-class\n type of 'a', otherwise it will be a base-class array. Defaults\n to False.\n (Only support False at this moment)\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n (Not supported at this moment)\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `prototype`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n\n Examples\n --------\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> np.empty_like(a)\n array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized\n [ 4567052944, -5764607523034234880, 844424930131968]])\n >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])\n >>> np.empty_like(a)\n array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized\n [2.0e-323, 2.5e-323, 3.0e-323]])\n \"\"\"\n dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',\n _np.int64:'int64', _np.float16:'float16', _np.float32:'float32',\n _np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}\n if order != 'C':\n raise NotImplementedError(\"Only support C-order at this moment\")\n if subok:\n raise NotImplementedError(\"Creating array by using sub-class is not supported at this moment\")\n if shape is not None:\n raise NotImplementedError(\"Assigning new shape is not supported at this moment\")\n try:\n dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]\n except:\n raise NotImplementedError(\"Do not support this dtype at this moment\")\n return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef arange(start, stop=None, step=1, dtype=None, ctx=None):\n \"\"\"Return evenly spaced values within a given interval.\n\n Values are generated within the half-open interval ``[start, stop)``\n (in other words, the interval including `start` but excluding `stop`).\n For integer arguments the function is equivalent to the Python built-in\n `range` function, but returns an ndarray rather than a list.\n\n Parameters\n ----------\n start : number, optional\n Start of interval. The interval includes this value. The default\n start value is 0.\n stop : number\n End of interval. The interval does not include this value, except\n in some cases where `step` is not an integer and floating point\n round-off affects the length of `out`.\n step : number, optional\n Spacing between values. For any output `out`, this is the distance\n between two adjacent values, ``out[i+1] - out[i]``. The default\n step size is 1. If `step` is specified as a position argument,\n `start` must also be given.\n dtype : dtype\n The type of the output array. The default is `float32`.\n\n Returns\n -------\n arange : ndarray\n Array of evenly spaced values.\n\n For floating point arguments, the length of the result is\n ``ceil((stop - start)/step)``. Because of floating point overflow,\n this rule may result in the last element of `out` being greater\n than `stop`.\n \"\"\"\n if dtype is None:\n dtype = 'float32'\n if ctx is None:\n ctx = current_context()\n if stop is None:\n stop = start\n start = 0\n if step is None:\n step = 1\n if start is None and stop is None:\n raise ValueError('start and stop cannot be both None')\n if step == 0:\n raise ZeroDivisionError('step cannot be 0')\n return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef identity(n, dtype=None, ctx=None):\n \"\"\"\n Return the identity array.\n\n The identity array is a square array with ones on\n the main diagonal.\n\n Parameters\n ----------\n n : int\n Number of rows (and columns) in `n` x `n` output.\n dtype : data-type, optional\n Data-type of the output. Defaults to ``numpy.float32``.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n `n` x `n` array with its main diagonal set to one,\n and all other elements 0.\n\n Examples\n --------\n >>> np.identity(3)\n >>> np.identity(3)\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n \"\"\"\n if not isinstance(n, int):\n raise TypeError(\"Input 'n' should be an integer\")\n if n < 0:\n raise ValueError(\"Input 'n' cannot be negative\")\n if ctx is None:\n ctx = current_context()\n dtype = _np.float32 if dtype is None else dtype\n return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef take(a, indices, axis=None, mode='raise', out=None):\n r\"\"\"\n Take elements from an array along an axis.\n\n When axis is not None, this function does the same thing as \"fancy\"\n indexing (indexing arrays using arrays); however, it can be easier to use\n if you need elements along a given axis. A call such as\n ``np.take(arr, indices, axis=3)`` is equivalent to\n ``arr[:,:,:,indices,...]``.\n\n Explained without fancy indexing, this is equivalent to the following use\n of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of\n indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n Nj = indices.shape\n for ii in ndindex(Ni):\n for jj in ndindex(Nj):\n for kk in ndindex(Nk):\n out[ii + jj + kk] = a[ii + (indices[jj],) + kk]\n\n Parameters\n ----------\n a : ndarray\n The source array.\n indices : ndarray\n The indices of the values to extract. Also allow scalars for indices.\n axis : int, optional\n The axis over which to select values. By default, the flattened\n input array is used.\n out : ndarray, optional\n If provided, the result will be placed in this array. It should\n be of the appropriate shape and dtype.\n mode : {'clip', 'wrap'}, optional\n Specifies how out-of-bounds indices will behave.\n\n * 'clip' -- clip to the range (default)\n * 'wrap' -- wrap around\n\n 'clip' mode means that all indices that are too large are replaced\n by the index that addresses the last element along that axis. Note\n that this disables indexing with negative numbers.\n\n Returns\n -------\n out : ndarray\n The returned array has the same type as `a`.\n\n Notes\n -----\n\n This function differs from the original `numpy.take\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in\n the following way(s):\n\n - Only ndarray or scalar ndarray is accepted as valid input.\n\n Examples\n --------\n >>> a = np.array([4, 3, 5, 7, 6, 8])\n >>> indices = np.array([0, 1, 4])\n >>> np.take(a, indices)\n array([4., 3., 6.])\n\n In this example for `a` is an ndarray, \"fancy\" indexing can be used.\n\n >>> a[indices]\n array([4., 3., 6.])\n\n If `indices` is not one dimensional, the output also has these dimensions.\n\n >>> np.take(a, np.array([[0, 1], [2, 3]]))\n array([[4., 3.],\n [5., 7.]])\n \"\"\"\n if mode not in ('wrap', 'clip', 'raise'):\n raise NotImplementedError(\n \"function take does not support mode '{}'\".format(mode))\n if axis is None:\n return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)\n else:\n return _npi.take(a, indices, axis, mode, out)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef insert(arr, obj, values, axis=None):\n \"\"\"\n Insert values along the given axis before the given indices.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : int, slice or ndarray of int64\n Object that defines the index or indices before which `values` is\n inserted.\n Support for multiple insertions when `obj` is a single scalar or a\n sequence with one element (only support int32 and int64 element).\n values : ndarray\n Values to insert into `arr`.\n If the type of values is different from that of arr, values is converted\n to the type of arr.\n axis : int, optional\n Axis along which to insert `values`. If `axis` is None then `arr`\n is flattened first.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with `values` inserted. Note that `insert`\n does not occur in-place: a new array is returned. If\n `axis` is None, `out` is a flattened array.\n\n Notes\n -----\n - Note that for higher dimensional inserts `obj=0` behaves very different\n from `obj=[0]` just like `arr[:,0,:] = values` is different from\n `arr[:,[0],:] = values`.\n - If obj is a ndarray, it's dtype only supports int64\n\n Examples\n --------\n >>> a = np.array([[1, 1], [2, 2], [3, 3]])\n >>> a\n array([[1., 1.],\n [2., 2.],\n [3., 3.]])\n >>> np.insert(a, 1, np.array(5))\n array([1., 5., 1., 2., 2., 3., 3.])\n >>> np.insert(a, 1, np.array(5), axis=1)\n array([[1., 5., 1.],\n [2., 5., 2.],\n [3., 5., 3.]])\n\n Difference between sequence and scalars:\n\n >>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n\n >>> b = a.flatten()\n >>> b\n array([1., 1., 2., 2., 3., 3.])\n >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))\n array([1., 1., 5., 6., 2., 2., 3., 3.])\n\n >>> np.insert(b, slice(2, 4), np.array([5, 6]))\n array([1., 1., 5., 2., 6., 2., 3., 3.])\n\n # type casting\n >>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))\n array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)\n\n >>> x = np.arange(8).reshape(2, 4)\n >>> idx = np.array([1, 3], dtype=np.int64)\n >>> np.insert(x, idx, np.array([999]), axis=1)\n array([[ 0., 999., 1., 2., 999., 3.],\n [ 4., 999., 5., 6., 999., 7.]])\n \"\"\"\n if isinstance(values, numeric_types):\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)\n elif isinstance(obj, integer_types):\n return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)\n elif isinstance(obj, NDArray):\n return _npi.insert_tensor(arr, obj, val=values, axis=axis)\n\n if not isinstance(arr, NDArray):\n raise TypeError(\"'arr' can not support type {}\".format(str(type(arr))))\n if not isinstance(values, NDArray):\n raise TypeError(\"'values' can not support type {}\".format(str(type(values))))\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)\n elif isinstance(obj, integer_types):\n return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)\n elif isinstance(obj, NDArray):\n return _npi.insert_tensor(arr, values, obj, axis=axis)\n else:\n raise TypeError(\"'obj' can not support type {}\".format(str(type(obj))))\n\n\n#pylint: disable= too-many-arguments, no-member, protected-access\ndef _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):\n \"\"\" Helper function for element-wise operation.\n The function will perform numpy-like broadcasting if needed and call different functions.\n\n Parameters\n --------\n lhs : ndarray or numeric value\n Left-hand side operand.\n\n rhs : ndarray or numeric value\n Right-hand operand,\n\n fn_array : function\n Function to be called if both lhs and rhs are of ``ndarray`` type.\n\n fn_scalar : function\n Function to be called if both lhs and rhs are numeric values.\n\n lfn_scalar : function\n Function to be called if lhs is ``ndarray`` while rhs is numeric value\n\n rfn_scalar : function\n Function to be called if lhs is numeric value while rhs is ``ndarray``;\n if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar\n\n Returns\n --------\n mxnet.numpy.ndarray or scalar\n result array or scalar\n \"\"\"\n from ...numpy import ndarray\n if isinstance(lhs, numeric_types):\n if isinstance(rhs, numeric_types):\n return fn_scalar(lhs, rhs, out=out)\n else:\n if rfn_scalar is None:\n # commutative function\n return lfn_scalar(rhs, float(lhs), out=out)\n else:\n return rfn_scalar(rhs, float(lhs), out=out)\n elif isinstance(rhs, numeric_types):\n return lfn_scalar(lhs, float(rhs), out=out)\n elif isinstance(rhs, ndarray):\n return fn_array(lhs, rhs, out=out)\n else:\n raise TypeError('type {} not supported'.format(str(type(rhs))))\n#pylint: enable= too-many-arguments, no-member, protected-access\n\n\n@set_module('mxnet.ndarray.numpy')\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):\n \"\"\"\n Find the unique elements of an array.\n\n Returns the sorted unique elements of an array. There are three optional\n outputs in addition to the unique elements:\n\n * the indices of the input array that give the unique values\n * the indices of the unique array that reconstruct the input array\n * the number of times each unique value comes up in the input array\n\n Parameters\n ----------\n ar : ndarray\n Input array. Unless `axis` is specified, this will be flattened if it\n is not already 1-D.\n return_index : bool, optional\n If True, also return the indices of `ar` (along the specified axis,\n if provided, or in the flattened array) that result in the unique array.\n return_inverse : bool, optional\n If True, also return the indices of the unique array (for the specified\n axis, if provided) that can be used to reconstruct `ar`.\n return_counts : bool, optional\n If True, also return the number of times each unique item appears\n in `ar`.\n axis : int or None, optional\n The axis to operate on. If None, `ar` will be flattened. If an integer,\n the subarrays indexed by the given axis will be flattened and treated\n as the elements of a 1-D array with the dimension of the given axis,\n see the notes for more details. The default is None.\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n unique_indices : ndarray, optional\n The indices of the first occurrences of the unique values in the\n original array. Only provided if `return_index` is True.\n unique_inverse : ndarray, optional\n The indices to reconstruct the original array from the\n unique array. Only provided if `return_inverse` is True.\n unique_counts : ndarray, optional\n The number of times each of the unique values comes up in the\n original array. Only provided if `return_counts` is True.\n\n Notes\n -----\n When an axis is specified the subarrays indexed by the axis are sorted.\n This is done by making the specified axis the first dimension of the array\n and then flattening the subarrays in C order. The flattened subarrays are\n then viewed as a structured type with each element given a label, with the\n effect that we end up with a 1-D array of structured types that can be\n treated in the same way as any other 1-D array. The result is that the\n flattened subarrays are sorted in lexicographic order starting with the\n first element.\n\n This function differs from the original `numpy.unique\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in\n the following aspects:\n\n - Only support ndarray as input.\n - Object arrays or structured arrays are not supported.\n\n Examples\n --------\n >>> np.unique(np.array([1, 1, 2, 2, 3, 3]))\n array([1., 2., 3.])\n >>> a = np.array([[1, 1], [2, 3]])\n >>> np.unique(a)\n array([1., 2., 3.])\n\n Return the unique rows of a 2D array\n\n >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])\n >>> np.unique(a, axis=0)\n array([[1., 0., 0.],\n [2., 3., 4.]])\n\n Return the indices of the original array that give the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_index=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 5, 3, 2], dtype=int64)\n >>> a[indices]\n array([1., 2., 3., 4., 6.])\n\n Reconstruct the input array from the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_inverse=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 4, 3, 1, 2, 1], dtype=int64)\n >>> u[indices]\n array([1., 2., 6., 4., 2., 3., 2.])\n \"\"\"\n ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis)\n if isinstance(ret, list):\n return tuple(ret)\n else:\n return ret\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef add(x1, x2, out=None, **kwargs):\n \"\"\"\n Add arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n add : ndarray or scalar\n The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef subtract(x1, x2, out=None, **kwargs):\n \"\"\"\n Subtract arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be subtracted from each other. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape\n of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n subtract : ndarray or scalar\n The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,\n _npi.rsubtract_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef multiply(x1, x2, out=None, **kwargs):\n \"\"\"\n Multiply arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2\n are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef divide(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns a true division of the inputs, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of float32 type.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,\n _npi.rtrue_divide_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef true_divide(x1, x2, out=None):\n \"\"\"Returns a true division of the inputs, element-wise.\n\n Instead of the Python traditional 'floor division', this returns a true\n division. True division adjusts the output type to present the best\n answer, regardless of input types.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Notes\n -----\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of float32 type.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,\n _npi.rtrue_divide_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef mod(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef delete(arr, obj, axis=None):\n \"\"\"\n Return a new array with sub-arrays along an axis deleted. For a one\n dimensional array, this returns those entries not returned by\n `arr[obj]`.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : slice, int or ndarray of ints\n Indicate indices of sub-arrays to remove along the specified axis.\n axis : int, optional\n The axis along which to delete the subarray defined by `obj`.\n If `axis` is None, `obj` is applied to the flattened array.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with the elements specified by `obj` removed. Note\n that `delete` does not occur in-place. If `axis` is None, `out` is\n a flattened array.\n\n Examples\n --------\n >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\n >>> arr\n array([[ 1., 2., 3., 4.],\n [ 5., 6., 7., 8.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, 1, 0)\n array([[ 1., 2., 3., 4.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, slice(None, None, 2), 1)\n array([[ 2., 4.],\n [ 6., 8.],\n [10., 12.]])\n\n >>> np.delete(arr, np.array([1,3,5]), None)\n array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])\n >>> np.delete(arr, np.array([1,1,5]), None)\n array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])\n \"\"\"\n if not isinstance(arr, NDArray):\n raise TypeError(\"'arr' can not support type {}\".format(str(type(arr))))\n if isinstance(obj, slice):\n start = obj.start\n stop = obj.stop\n step = 1 if obj.step is None else obj.step\n return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)\n elif isinstance(obj, integer_types):\n return _npi.delete(arr, int_ind=obj, axis=axis)\n elif isinstance(obj, NDArray):\n return _npi.delete(arr, obj, axis=axis)\n else:\n raise TypeError(\"'obj' can not support type {}\".format(str(type(obj))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef matmul(a, b, out=None):\n \"\"\"\n Matrix product of two arrays.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays, scalars not allowed.\n out : ndarray, optional\n A location into which the result is stored.\n If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The matrix product of the inputs.\n This is a scalar only when both x1, x2 are 1-d vectors.\n\n Raises\n ------\n MXNetError\n If the last dimension of a is not the same size as the second-to-last dimension of b.\n If a scalar value is passed in.\n\n See Also\n --------\n tensordot :\n Sum products over arbitrary axes.\n dot :\n alternative matrix product with different broadcasting rules.\n einsum :\n Einstein summation convention.\n\n Notes\n -----\n The behavior depends on the arguments in the following way.\n\n - If both arguments are 2-D they are multiplied like conventional matrices.\n - If either argument is N-D, N > 2, it is treated as a stack of matrices\n residing in the last two indexes and broadcast accordingly.\n - If the first argument is 1-D, it is promoted to a matrix by prepending\n a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n - If the second argument is 1-D, it is promoted to a matrix by appending a 1\n to its dimensions. After matrix multiplication the appended 1 is removed.\n\n matmul differs from dot in two important ways:\n\n - Multiplication by scalars is not allowed, use multiply instead.\n - Stacks of matrices are broadcast together as if the matrices were elements,\n respecting the signature (n,k),(k,m)->(n,m):\n >>> a = np.ones([9, 5, 7, 4])\n >>> c = np.ones([9, 5, 4, 3])\n >>> np.dot(a, c).shape\n (9, 5, 7, 9, 5, 3)\n >>> np.matmul(a, c).shape\n (9, 5, 7, 3)\n >>> # n is 7, k is 4, m is 3\n\n Examples\n --------\n For 2-D arrays it is the matrix product:\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([[4, 1],\n ... [2, 2]])\n >>> np.matmul(a, b)\n array([[4., 1.],\n [2., 2.]])\n\n For 2-D mixed with 1-D, the result is the usual.\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([1, 2])\n >>> np.matmul(a, b)\n array([1., 2.])\n >>> np.matmul(b, a)\n array([1., 2.])\n\n Broadcasting is conventional for stacks of arrays\n >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))\n >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))\n >>> np.matmul(a, b).shape\n (2, 2, 2)\n >>> np.matmul(a, b)[0, 1, 1]\n array(98.)\n >>> sum(a[0, 1, :] * b[0, :, 1])\n array(98.)\n\n Scalar multiplication raises an error.\n >>> np.matmul([1, 2], 3)\n Traceback (most recent call last):\n ...\n mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.\n \"\"\"\n return _npi.matmul(a, b, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef remainder(x1, x2, out=None):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef power(x1, x2, out=None, **kwargs):\n \"\"\"\n First array elements raised to powers from second array, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n The bases.\n\n x2 : ndarray or scalar\n The exponent.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The bases in x1 raised to the exponents in x2.\n This is a scalar if both x1 and x2 are scalars.\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argsort(a, axis=-1, kind=None, order=None):\n \"\"\"\n Returns the indices that would sort an array.\n Perform an indirect sort along the given axis using the algorithm specified\n by the `kind` keyword. It returns an array of indices of the same shape as\n `a` that index data along the given axis in sorted order.\n\n Parameters\n ----------\n a : ndarray\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n kind : string, optional\n This argument can take any string, but it does not have any effect on the\n final result.\n order : str or list of str, optional\n Not supported yet, will raise NotImplementedError if not None.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sort `a` along the specified `axis`.\n If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.\n More generally, ``np.take_along_axis(a, index_array, axis=axis)``\n always yields the sorted `a`, irrespective of dimensionality.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 1, 2])\n >>> np.argsort(x)\n array([1, 2, 0])\n\n Two-dimensional array:\n\n >>> x = np.array([[0, 3], [2, 2]])\n >>> x\n array([[0, 3],\n [2, 2]])\n >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)\n >>> ind\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)\n array([[0, 2],\n [2, 3]])\n >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)\n >>> ind\n array([[0, 1],\n [0, 1]])\n >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)\n array([[0, 3],\n [2, 2]])\n\n Indices of the sorted elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)\n >>> ind\n (array([0, 1, 1, 0]), array([0, 0, 1, 1]))\n >>> x[ind] # same as np.sort(x, axis=None)\n array([0, 2, 2, 3])\n \"\"\"\n if order is not None:\n raise NotImplementedError(\"order not supported here\")\n\n return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')\n\n\n@set_module('mxnet.ndarray.numpy')\ndef sort(a, axis=-1, kind=None, order=None):\n \"\"\"\n Return a sorted copy of an array.\n\n Parameters\n ----------\n a : ndarray\n Array to be sorted.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n kind : string, optional\n This argument can take any string, but it does not have any effect on the\n final result.\n order : str or list of str, optional\n Not supported yet, will raise NotImplementedError if not None.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n >>> a = np.array([[1,4],[3,1]])\n >>> np.sort(a) # sort along the last axis\n array([[1, 4],\n [1, 3]])\n >>> np.sort(a, axis=None) # sort the flattened array\n array([1, 1, 3, 4])\n >>> np.sort(a, axis=0) # sort along the first axis\n array([[1, 1],\n [3, 4]])\n \"\"\"\n if order is not None:\n raise NotImplementedError(\"order not supported here\")\n return _npi.sort(data=a, axis=axis, is_ascend=True)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tensordot(a, b, axes=2):\n r\"\"\"\n tensordot(a, b, axes=2)\n Compute tensor dot product along specified axes for arrays >= 1-D.\n Given two tensors (arrays of dimension greater than or equal to one),\n `a` and `b`, and an ndarray object containing two ndarray\n objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s\n elements (components) over the axes specified by ``a_axes`` and\n ``b_axes``. The third argument can be a single non-negative\n integer_like scalar, ``N``; if it is such, then the last ``N``\n dimensions of `a` and the first ``N`` dimensions of `b` are summed\n over.\n Parameters\n ----------\n a, b : ndarray, len(shape) >= 1\n Tensors to \"dot\".\n axes : int or (2,) ndarray\n * integer_like\n If an int N, sum over the last N axes of `a` and the first N axes\n of `b` in order. The sizes of the corresponding axes must match.\n * (2,) ndarray\n Or, a list of axes to be summed over, first sequence applying to `a`,\n second to `b`. Both elements ndarray must be of the same length.\n See Also\n --------\n dot, einsum\n Notes\n -----\n Three common use cases are:\n * ``axes = 0`` : tensor product :math:`a\\otimes b`\n * ``axes = 1`` : tensor dot product :math:`a\\cdot b`\n * ``axes = 2`` : (default) tensor double contraction :math:`a:b`\n When `axes` is integer_like, the sequence for evaluation will be: first\n the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and\n Nth axis in `b` last.\n When there is more than one axis to sum over - and they are not the last\n (first) axes of `a` (`b`) - the argument `axes` should consist of\n two sequences of the same length, with the first axis to sum over given\n first in both sequences, the second axis second, and so forth.\n Examples\n --------\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))\n >>> c.shape\n (5, 2)\n >>> c\n array([[ 4400., 4730.],\n [ 4532., 4874.],\n [ 4664., 5018.],\n [ 4796., 5162.],\n [ 4928., 5306.]])\n \"\"\"\n if _np.isscalar(axes):\n return _npi.tensordot_int_axes(a, b, axes)\n\n if len(axes) != 2:\n raise ValueError('Axes must consist of two arrays.')\n a_axes_summed, b_axes_summed = axes\n if _np.isscalar(a_axes_summed):\n a_axes_summed = (a_axes_summed,)\n if _np.isscalar(b_axes_summed):\n b_axes_summed = (b_axes_summed,)\n\n if len(a_axes_summed) != len(b_axes_summed):\n raise ValueError('Axes length mismatch')\n\n return _npi.tensordot(a, b, a_axes_summed, b_axes_summed)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the histogram of a set of data.\n\n Parameters\n ----------\n a : ndarray\n Input data. The histogram is computed over the flattened array.\n bins : int or NDArray\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n .. versionadded:: 1.11.0\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n range : (float, float)\n The lower and upper range of the bins. Required when `bins` is an integer.\n Values outside the range are ignored. The first element of the range must\n be less than or equal to the second.\n normed : bool, optional\n Not supported yet, coming soon.\n weights : array_like, optional\n Not supported yet, coming soon.\n density : bool, optional\n Not supported yet, coming soon.\n \"\"\"\n if normed is True:\n raise NotImplementedError(\"normed is not supported yet...\")\n if weights is not None:\n raise NotImplementedError(\"weights is not supported yet...\")\n if density is True:\n raise NotImplementedError(\"density is not supported yet...\")\n if isinstance(bins, numeric_types):\n if range is None:\n raise NotImplementedError(\"automatic range is not supported yet...\")\n return _npi.histogram(a, bin_cnt=bins, range=range)\n if isinstance(bins, (list, tuple)):\n raise NotImplementedError(\"array_like bins is not supported yet...\")\n if isinstance(bins, str):\n raise NotImplementedError(\"string bins is not supported yet...\")\n if isinstance(bins, NDArray):\n return _npi.histogram(a, bins=bins)\n raise ValueError(\"np.histogram fails with\", locals())\n\n\n@set_module('mxnet.ndarray.numpy')\ndef eye(N, M=None, k=0, dtype=_np.float32, **kwargs):\n \"\"\"\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to N.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal,\n and a negative value to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero,\n except for the k-th diagonal, whose values are equal to one.\n \"\"\"\n _sanity_check_params('eye', ['order'], kwargs)\n ctx = kwargs.pop('ctx', current_context())\n if ctx is None:\n ctx = current_context()\n return _npi.eye(N, M, k, ctx, dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments\n r\"\"\"\n Return evenly spaced numbers over a specified interval.\n Returns num evenly spaced samples, calculated over the interval [start, stop].\n The endpoint of the interval can optionally be excluded.\n\n Parameters\n ----------\n start : real number\n The starting value of the sequence.\n stop : real number\n The end value of the sequence, unless endpoint is set to False. In\n that case, the sequence consists of all but the last of num + 1\n evenly spaced samples, so that stop is excluded. Note that the step\n size changes when endpoint is False.\n num : int, optional\n Number of samples to generate. Default is 50. Must be non-negative.\n endpoint : bool, optional\n If True, stop is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (samples, step), where step is the spacing between samples.\n dtype : dtype, optional\n The type of the output array. If dtype is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start or\n stop are array-like. By default (0), the samples will be along a new\n axis inserted at the beginning. Use -1 to get an axis at the end.\n\n Returns\n -------\n samples : ndarray\n There are num equally spaced samples in the closed interval\n `[start, stop]` or the half-open interval `[start, stop)`\n (depending on whether endpoint is True or False).\n step : float, optional\n Only returned if retstep is True\n Size of spacing between samples.\n\n\n See Also\n --------\n arange : Similar to `linspace`, but uses a step size (instead of the\n number of samples).\n\n Examples\n --------\n >>> np.linspace(2.0, 3.0, num=5)\n array([2. , 2.25, 2.5 , 2.75, 3. ])\n >>> np.linspace(2.0, 3.0, num=5, endpoint=False)\n array([2. , 2.2, 2.4, 2.6, 2.8])\n >>> np.linspace(2.0, 3.0, num=5, retstep=True)\n (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 8\n >>> y = np.zeros(N)\n >>> x1 = np.linspace(0, 10, N, endpoint=True)\n >>> x2 = np.linspace(0, 10, N, endpoint=False)\n >>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n Notes\n -----\n\n This function differs from the original `numpy.linspace\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in\n the following aspects:\n\n - `start` and `stop` do not support list, numpy ndarray and mxnet ndarray\n - axis could only be 0\n - There could be an additional `ctx` argument to specify the device, e.g. the i-th\n GPU.\n \"\"\"\n if isinstance(start, (list, _np.ndarray, NDArray)) or \\\n isinstance(stop, (list, _np.ndarray, NDArray)):\n raise NotImplementedError('start and stop only support int')\n if axis != 0:\n raise NotImplementedError(\"the function only support axis 0\")\n if ctx is None:\n ctx = current_context()\n if retstep:\n step = (stop - start) / (num - 1)\n return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step\n else:\n return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments\n r\"\"\"Return numbers spaced evenly on a log scale.\n\n In linear space, the sequence starts at ``base ** start``\n (`base` to the power of `start`) and ends with ``base ** stop``\n (see `endpoint` below).\n\n Non-scalar `start` and `stop` are now supported.\n\n Parameters\n ----------\n start : int or float\n ``base ** start`` is the starting value of the sequence.\n stop : int or float\n ``base ** stop`` is the final value of the sequence, unless `endpoint`\n is False. In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length `num`) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n base : float, optional\n The base of the log space. The step size between the elements in\n ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.\n Default is 10.0.\n dtype : dtype\n The type of the output array. If `dtype` is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start\n or stop are array-like. By default (0), the samples will be along a\n new axis inserted at the beginning. Now, axis only support axis = 0.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n arange : Similar to linspace, with the step size specified instead of the\n number of samples. Note that, when used with a float endpoint, the\n endpoint may or may not be included.\n linspace : Similar to logspace, but with the samples uniformly distributed\n in linear space, instead of log space.\n\n Notes\n -----\n Logspace is equivalent to the code. Now wo only support axis = 0.\n\n >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)\n ...\n >>> power(base, y).astype(dtype)\n ...\n\n Examples\n --------\n >>> np.logspace(2.0, 3.0, num=4)\n array([ 100. , 215.44347, 464.15887, 1000. ])\n >>> np.logspace(2.0, 3.0, num=4, endpoint=False)\n array([100. , 177.82794, 316.22775, 562.3413 ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0)\n array([4. , 5.0396843, 6.349604 , 8. ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)\n array([4, 5, 6, 8], dtype=int32)\n >>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))\n array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))\n \"\"\"\n if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \\\n isinstance(stop, (list, tuple, _np.ndarray, NDArray)):\n raise NotImplementedError('start and stop only support int and float')\n if axis != 0:\n raise NotImplementedError(\"the function only support axis 0\")\n if ctx is None:\n ctx = current_context()\n return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef expand_dims(a, axis):\n \"\"\"Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis : int\n Position in the expanded axes where the new axis is placed.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n \"\"\"\n return _npi.expand_dims(a, axis)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef lcm(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns the lowest common multiple of ``|x1|`` and ``|x2|``\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays for computing lowest common multiple. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape of\n one or the other).\n\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The lowest common multiple of the absolute value of the inputs\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n gcd : The greatest common divisor\n\n Examples\n --------\n >>> np.lcm(12, 20)\n 60\n >>> np.lcm(np.arange(6, dtype=int), 20)\n array([ 0, 20, 20, 60, 20, 20], dtype=int64)\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tril(m, k=0):\n r\"\"\"\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : ndarray, shape (M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n tril : ndarray, shape (M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : same thing, only for the upper triangle\n\n Examples\n --------\n >>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])\n >>> np.tril(a, -1)\n array([[ 0., 0., 0.],\n [ 4., 0., 0.],\n [ 7., 8., 0.],\n [10., 11., 12.]])\n \"\"\"\n return _npi.tril(m, k)\n\n\ndef _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):\n \"\"\"Helper function for unary operators.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input of the unary operator.\n fn_array : function\n Function to be called if x is of ``ndarray`` type.\n fn_scalar : function\n Function to be called if x is a Python scalar.\n out : ndarray\n The buffer ndarray for storing the result of the unary function.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n Result array or scalar.\n \"\"\"\n if isinstance(x, numeric_types):\n return fn_scalar(x, **kwargs)\n elif isinstance(x, NDArray):\n return fn_array(x, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sin(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The sine of each element of x. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sin(np.pi/2.)\n 1.0\n >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)\n array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])\n \"\"\"\n return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cos(x, out=None, **kwargs):\n r\"\"\"\n Cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding cosine values. This is a scalar if x is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cos(np.array([0, np.pi/2, np.pi]))\n array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.cos(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sinh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic sine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sinh(0)\n 0.0\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.sinh(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cosh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic cosine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cosh(0)\n 1.0\n \"\"\"\n return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef tanh(x, out=None, **kwargs):\n \"\"\"\n Compute hyperbolic tangent element-wise.\n Equivalent to ``np.sinh(x)/np.cosh(x)``.\n\n Parameters\n ----------\n x : ndarray or scalar.\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic tangent values.\n\n Notes\n -----\n If `out` is provided, the function writes the result into it,\n and returns a reference to `out`. (See Examples)\n - input x does not support complex computation (like imaginary number)\n >>> np.tanh(np.pi*1j)\n TypeError: type <type 'complex'> not supported\n\n Examples\n --------\n >>> np.tanh(np.array[0, np.pi]))\n array([0. , 0.9962721])\n >>> np.tanh(np.pi)\n 0.99627207622075\n >>> # Example of providing the optional output parameter illustrating\n >>> # that what is returned is a reference to said parameter\n >>> out1 = np.array(1)\n >>> out2 = np.tanh(np.array(0.1), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log10(x, out=None, **kwargs):\n \"\"\"\n Return the base 10 logarithm of the input array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which t'absolute', he result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The logarithm to the base 10 of `x`, element-wise. NaNs are\n returned where x is negative. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.log10(np.array([1e-15, -3.]))\n array([-15., nan])\n \"\"\"\n return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sqrt(x, out=None, **kwargs):\n \"\"\"\n Return the non-negative square-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose square-roots are required.\n out : ndarray, or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n An array of the same shape as `x`, containing the positive\n square-root of each element in `x`. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sqrt(np.array([1,4,9]))\n array([1., 2., 3.])\n >>> np.sqrt(np.array([4, -1, _np.inf]))\n array([ 2., nan, inf])\n \"\"\"\n return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef cbrt(x, out=None, **kwargs):\n r\"\"\"\n Return the cube-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray\n The values whose cube-roots are required.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n y : ndarray\n An array of the same shape as x, containing the cube cube-root of each element in x.\n If out was provided, y is a reference to it. This is a scalar if x is a scalar.\n\n Examples\n ----------\n >>> np.cbrt([1,8,27])\n array([ 1., 2., 3.])\n \"\"\"\n return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef abs(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of\n each element in `x`. This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> x = np.array([-1.2, 1.2])\n >>> np.abs(x)\n array([1.2, 1.2])\n \"\"\"\n return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef absolute(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n np.abs is a shorthand for this function.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n absolute : ndarray\n An ndarray containing the absolute value of each element in x.\n\n Examples\n ----------\n >>> x = np.array([-1.2, 1.2])\n >>> np.absolute(x)\n array([ 1.2, 1.2])\n \"\"\"\n return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef sign(x, out=None, **kwargs):\n r\"\"\"\n Returns an element-wise indication of the sign of a number.\n The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.\n\n Parameters\n ----------\n x : ndarray or a scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The sign of `x`.\n This is a scalar if `x` is a scalar.\n\n Note\n -------\n - Only supports real number as input elements.\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([-5., 4.5])\n >>> np.sign(a)\n array([-1., 1.])\n >>> # Use scalars as inputs:\n >>> np.sign(4.0)\n 1.0\n >>> np.sign(0)\n 0\n >>> # Use ``out`` parameter:\n >>> b = np.zeros((2, ))\n >>> np.sign(a, out=b)\n array([-1., 1.])\n >>> b\n array([-1., 1.])\n \"\"\"\n return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef exp(x, out=None, **kwargs):\n r\"\"\"\n Calculate the exponential of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential of `x`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.exp(1)\n 2.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.exp(x)\n array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])\n \"\"\"\n return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef expm1(x, out=None, **kwargs):\n r\"\"\"\n Calculate `exp(x) - 1` of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential minus one: `out = exp(x) - 1`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.expm1(1)\n 1.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.expm1(x)\n array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])\n \"\"\"\n return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arcsin(x, out=None, **kwargs):\n r\"\"\"\n Inverse sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n `y`-coordinate on the unit circle.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n angle : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n The inverse sine of each element in `x`, in radians and in the\n closed interval ``[-pi/2, pi/2]``.\n\n Examples\n --------\n >>> np.arcsin(1) # pi/2\n 1.5707963267948966\n >>> np.arcsin(-1) # -pi/2\n -1.5707963267948966\n >>> np.arcsin(0)\n 0.0\n\n Notes\n -----\n `arcsin` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that :math:`sin(z) = x`. The convention is to\n return the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, *arcsin* always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n The inverse sine is also known as `asin` or sin^{-1}.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.arcsin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in\n the following aspects:\n - Only support ndarray or scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,\n 10th printing, New York: Dover, 1964, pp. 79ff.\n http://www.math.sfu.ca/~cbm/aands/\n \"\"\"\n return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arccos(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse cosine, element-wise.\n The inverse of cos so that, if y = cos(x), then x = arccos(y).\n\n Parameters\n ----------\n x : ndarray\n x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that\n the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n angle : ndarray\n The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].\n This is a scalar if x is a scalar.\n\n See also\n ----------\n cos, arctan, arcsin\n\n Notes\n ----------\n arccos is a multivalued function: for each x there are infinitely many numbers z such that\n cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].\n For real-valued input data types, arccos always returns real output.\n For each value that cannot be expressed as a real number or infinity, it yields nan and sets\n the invalid floating point error flag.\n The inverse cos is also known as acos or cos^-1.\n\n Examples\n ----------\n >>> np.arccos([1, -1])\n array([ 0. , 3.14159265])\n \"\"\"\n return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arctan(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse tangent, element-wise.\n The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Out has the same shape as `x`. It lies is in\n ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctan` is a multi-valued function: for each `x` there are infinitely\n many numbers `z` such that tan(`z`) = `x`. The convention is to return\n the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, `arctan` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n For complex-valued input, we do not have support for them yet.\n The inverse tangent is also known as `atan` or tan^{-1}.\n\n Examples\n --------\n >>> x = np.array([0, 1])\n >>> np.arctan(x)\n array([0. , 0.7853982])\n >>> np.pi/4\n 0.7853981633974483\n \"\"\"\n return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log(x, out=None, **kwargs):\n \"\"\"\n Natural logarithm, element-wise.\n The natural logarithm `log` is the inverse of the exponential function,\n so that `log(exp(x)) = x`. The natural logarithm is logarithm in base\n `e`.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The natural logarithm of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and\n ``nan`` according to the input.\n This function differs from the original `numpy.log\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)\n >>> np.log(a)\n array([ 0., 1., 2., -inf], dtype=float64)\n >>> # Using default float32 dtype may lead to slightly different behavior:\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32)\n >>> np.log(a)\n array([ 0., 0.99999994, 2., -inf])\n >>> np.log(1)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef degrees(x, out=None, **kwargs):\n \"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding degree values; if `out` was supplied this is a\n reference to it.\n This is a scalar if `x` is a scalar.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n - Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> rad = np.arange(12.) * np.pi / 6\n >>> np.degrees(rad)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> # Use specified ``out`` ndarray:\n >>> out = np.zeros((rad.shape))\n >>> np.degrees(rad, out)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> out\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n \"\"\"\n return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef rad2deg(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"rad2deg(x)\" is \"x *180 / pi\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.rad2deg(np.pi/2)\n 90.0\n \"\"\"\n return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef rint(x, out=None, **kwargs):\n \"\"\"\n Round elements of the array to the nearest integer.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.rint\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.rint(a)\n array([-2., -2., -0., 0., 1., 2., 2.])\n \"\"\"\n return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log2(x, out=None, **kwargs):\n \"\"\"\n Base-2 logarithm of x.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The logarithm base two of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.log2\n <https://www.google.com/search?q=numpy+log2>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> x = np.array([0, 1, 2, 2**4])\n >>> np.log2(x)\n array([-inf, 0., 1., 4.])\n \"\"\"\n return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef log1p(x, out=None, **kwargs):\n \"\"\"\n Return the natural logarithm of one plus the input array, element-wise.\n Calculates ``log(1 + x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n Natural logarithm of 1 + x, element-wise. This is a scalar\n if x is a scalar.\n\n Notes\n -----\n For real-valued input, `log1p` is accurate also for `x` so small\n that `1 + x == 1` in floating-point accuracy.\n Logarithm is a multivalued function: for each `x` there is an infinite\n number of `z` such that `exp(z) = 1 + x`. The convention is to return\n the `z` whose imaginary part lies in `[-pi, pi]`.\n For real-valued input data types, `log1p` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n cannot support complex-valued input.\n\n Examples\n --------\n >>> np.log1p(1e-99)\n 1e-99\n >>> a = np.array([3, 4, 5])\n >>> np.log1p(a)\n array([1.3862944, 1.609438 , 1.7917595])\n \"\"\"\n return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef radians(x, out=None, **kwargs):\n \"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array in degrees.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding radian values. This is a scalar if x is a scalar.\n\n Notes\n -----\n This function differs from the original `numpy.radians\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in\n the following way(s):\n - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n - broadcasting to `out` of different shape is currently not supported\n - when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> deg = np.arange(12.) * 30.\n >>> np.radians(deg)\n array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,\n 3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],\n dtype=float32)\n \"\"\"\n return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef deg2rad(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n \"deg2rad(x)\" is \"x * pi / 180\".\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float32 and float64.\n - `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.deg2rad(180)\n 3.1415927\n \"\"\"\n return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef reciprocal(x, out=None, **kwargs):\n r\"\"\"\n Return the reciprocal of the argument, element-wise.\n Calculates ``1/x``.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose reciprocals are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.reciprocal(2.)\n 0.5\n >>> x = np.array([1, 2., 3.33])\n >>> np.reciprocal(x)\n array([1. , 0.5 , 0.3003003])\n\n Notes\n -----\n .. note::\n This function is not designed to work with integers.\n For integer arguments with absolute value larger than 1 the result is\n always zero because of the way Python handles integer division. For\n integer zero the result is an overflow.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.reciprocal\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n \"\"\"\n return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef square(x, out=None, **kwargs):\n r\"\"\"\n Return the element-wise square of the input.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose squares are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.square(2.)\n 4.0\n >>> x = np.array([1, 2., -1])\n >>> np.square(x)\n array([1., 4., 1.])\n\n Notes\n -----\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.square\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in\n the following aspects:\n - Only support ndarray and scalar now.\n - `where` argument is not supported.\n - Complex input is not supported.\n \"\"\"\n return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef negative(x, out=None, **kwargs):\n r\"\"\"\n Numerical negative, element-wise.\n\n Parameters:\n ------------\n x : ndarray or scalar\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored.\n\n Returns:\n ---------\n y : ndarray or scalar\n Returned array or scalar: y = -x. This is a scalar if x is a scalar.\n\n Examples:\n ---------\n >>> np.negative(1)\n -1\n \"\"\"\n return _unary_func_helper(x, _npi.negative, _np.negative, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef fix(x, out=None, **kwargs):\n r\"\"\"\n Round an array of floats element-wise to nearest integer towards zero.\n The rounded values are returned as floats.\n\n Parameters:\n ----------\n x : ndarray\n An array of floats to be rounded\n out : ndarray, optional\n Output array\n\n Returns:\n -------\n y : ndarray of floats\n\n Examples\n ---------\n >>> np.fix(3.14)\n 3\n \"\"\"\n return _unary_func_helper(x, _npi.fix, _np.fix, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef tan(x, out=None, **kwargs):\n r\"\"\"\n Compute tangent element-wise.\n Equivalent to np.sin(x)/np.cos(x) element-wise.\n\n Parameters:\n ----------\n x : ndarray\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided,\n it must have a shape that the inputs broadcast to. If not provided or None,\n a freshly-allocated array is returned. A tuple (possible only as a keyword argument)\n must have length equal to the number of outputs.\n where : ndarray, optional\n Values of True indicate to calculate the ufunc at that position,\n values of False indicate to leave the value in the output alone.\n\n Returns:\n -------\n y : ndarray\n The corresponding tangent values. This is a scalar if x is a scalar.\n\n Examples:\n ---------\n >>> np.tan(0.5)\n 0.5463024898437905\n \"\"\"\n\n return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef ceil(x, out=None, **kwargs):\n r\"\"\"\n Return the ceiling of the input, element-wise.\n The ceil of the ndarray `x` is the smallest integer `i`, such that\n `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a same shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The ceiling of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.ceil(a)\n array([-1., -1., -0., 1., 2., 2., 2.])\n >>> #if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.ceil(np.array(3.5), a)\n array(4.)\n >>> a\n array(4.)\n \"\"\"\n return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef floor(x, out=None, **kwargs):\n r\"\"\"\n Return the floor of the input, element-wise.\n The floor of the ndarray `x` is the largest integer `i`, such that\n `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a same shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The floor of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.floor(a)\n array([-2., -2., -1., 0., 1., 1., 2.])\n >>> #if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.floor(np.array(3.5), a)\n array(3.)\n >>> a\n array(3.)\n \"\"\"\n return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef bitwise_not(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef invert(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef trunc(x, out=None, **kwargs):\n r\"\"\"\n Return the truncated value of the input, element-wise.\n The truncated value of the scalar `x` is the nearest integer `i` which\n is closer to zero than `x` is. In short, the fractional part of the\n signed number `x` is discarded.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : ndarray or scalar\n The truncated value of each element in `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original numpy.trunc in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.trunc(a)\n array([-1., -1., -0., 0., 1., 1., 2.])\n \"\"\"\n return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef logical_not(x, out=None, **kwargs):\n r\"\"\"\n Compute the truth value of NOT x element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Logical NOT is applied to the elements of `x`.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : bool or ndarray of bool\n Boolean result with the same shape as `x` of the NOT operation\n on elements of `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n This function differs from the original numpy.logical_not in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> x= np.array([True, False, 0, 1])\n >>> np.logical_not(x)\n array([False, True, True, False])\n\n >>> x = np.arange(5)\n >>> np.logical_not(x<3)\n array([False, False, False, True, True])\n \"\"\"\n return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arcsinh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arcsinh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arcsinh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `sinh(z) = x`.\n\n For real-valued input data types, `arcsinh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arcsinh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. DType of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arcsinh(a)\n array([1.8309381, 2.2924316])\n >>> np.arcsinh(1)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arccosh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arccosh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arccosh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `cosh(z) = x`.\n\n For real-valued input data types, `arccosh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arccosh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arccosh(a)\n array([1.8309381, 2.2924316])\n >>> np.arccosh(1)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef arctanh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic tangent, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arctanh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctanh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `tanh(z) = x`.\n\n For real-valued input data types, `arctanh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arctanh in the following aspects:\n - Do not support `where`, a parameter in numpy which indicates where to calculate.\n - Do not support complex-valued input.\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n - If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([0.0, -0.5])\n >>> np.arctanh(a)\n array([0., -0.54930615])\n >>> np.arctanh(0.0)\n 0.0\n \"\"\"\n return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef tile(A, reps):\n r\"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Parameters\n ----------\n A : ndarray or scalar\n An input array or a scalar to repeat.\n reps : a single integer or tuple of integers\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0., 1., 2., 0., 1., 2.])\n >>> np.tile(a, (2, 2))\n array([[0., 1., 2., 0., 1., 2.],\n [0., 1., 2., 0., 1., 2.]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0., 1., 2., 0., 1., 2.]],\n [[0., 1., 2., 0., 1., 2.]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1., 2., 1., 2.],\n [3., 4., 3., 4.]])\n >>> np.(b, (2, 1))\n array([[1., 2.],\n [3., 4.],\n [1., 2.],\n [3., 4.]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.]])\n\n Scalar as input:\n\n >>> np.tile(2, 3)\n array([2, 2, 2]) # repeating integer `2`\n\n \"\"\"\n return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"\n Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n \"\"\"\n axis_size = ary.shape[axis]\n if isinstance(indices_or_sections, integer_types):\n sections = indices_or_sections\n if axis_size % sections:\n raise ValueError('array split does not result in an equal division')\n section_size = int(axis_size / sections)\n indices = [i * section_size for i in range(sections)]\n elif isinstance(indices_or_sections, (list, set, tuple)):\n indices = [0] + list(indices_or_sections)\n else:\n raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')\n ret = _npi.split(ary, indices, axis, False)\n assert isinstance(ret, list), 'Output of split should be list,' \\\n ' got a return type {}'.format(type(ret))\n return ret\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Split an array into multiple sub-arrays.\n\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an array of length l that should be split into n sections, it returns\n l % n sub-arrays of size l//n + 1 and the rest of size l//n.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D Python tuple, list or set.\n Param used to determine the number and size of the subarray.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> np.array_split(x, [3, 5, 6, 8])\n [array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]\n\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]\n\n >>> x = np.arange(7.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]\n \"\"\"\n indices = []\n sections = 0\n if isinstance(indices_or_sections, integer_types):\n sections = indices_or_sections\n elif isinstance(indices_or_sections, (list, set, tuple)):\n indices = [0] + list(indices_or_sections)\n else:\n raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')\n ret = _npi.split(ary, indices, axis, False, sections)\n if not isinstance(ret, list):\n return [ret]\n return ret\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef hsplit(ary, indices_or_sections):\n \"\"\"Split an array into multiple sub-arrays horizontally (column-wise).\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int, list of ints or tuple of ints.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a list of sorted integers, the entries\n indicate where along `axis` the array is split.\n\n If an index exceeds the dimension of the array along `axis`,\n it will raises errors. so index must less than or euqal to\n the dimension of the array along axis.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Notes\n ------\n - If `indices_or_sections` is given as an integer, but a split\n does not result in equal division.It will raises ValueErrors.\n\n - If indices_or_sections is an integer, and the number is 1, it will\n raises an error. Because single output from split is not supported yet...\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, [3, 6])\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float32)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n\n If ``ary`` has one dimension, 'axis' = 0.\n >>> x = np.arange(4)\n array([0., 1., 2., 3.])\n >>> np.hsplit(x, 2)\n [array([0., 1.]), array([2., 3.])]\n\n If you want to produce an empty sub-array, you can see an example.\n >>> np.hsplit(x, [2, 2])\n [array([0., 1.]), array([], dtype=float32), array([2., 3.])]\n \"\"\"\n if len(ary.shape) < 1:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n indices = []\n sections = 0\n if isinstance(indices_or_sections, integer_types):\n sections = indices_or_sections\n elif isinstance(indices_or_sections, (list, set, tuple)):\n indices = [0] + list(indices_or_sections)\n else:\n raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')\n ret = _npi.hsplit(ary, indices, 1, False, sections)\n if not isinstance(ret, list):\n return [ret]\n return ret\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vsplit(ary, indices_or_sections):\n r\"\"\"\n vsplit(ary, indices_or_sections)\n\n Split an array into multiple sub-arrays vertically (row-wise).\n\n ``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split\n along the first axis regardless of the array dimension.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 0. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 0 the array is split. For example, ``[2, 3]`` would result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along axis 0, an error will be thrown.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Notes\n -------\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n\n - Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,\n tuple and list.\n - In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,\n an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n\n \"\"\"\n if len(ary.shape) < 2:\n raise ValueError(\"vsplit only works on arrays of 2 or more dimensions\")\n return split(ary, indices_or_sections, 0)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef dsplit(ary, indices_or_sections):\n \"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 2. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 2 the array is split. For example, ``[2, 3]`` would result in\n\n - ary[:, :, :2]\n - ary[:, :, 2:3]\n - ary[:, :, 3:]\n\n If an index exceeds the dimension of the array along axis 2, an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [12., 13.]]]), array([[[ 2., 3.],\n [ 6., 7.]],\n [[10., 11.],\n [14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[11.],\n [15.]]]),\n array([], shape=(2, 2, 0), dtype=float64)]\n \"\"\"\n if len(ary.shape) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\ndef concatenate(seq, axis=0, out=None):\n \"\"\"\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of ndarray\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n\n >>> np.concatenate((a, b), axis=None)\n array([1., 2., 3., 4., 5., 6.])\n\n >>> np.concatenate((a, b.T), axis=1)\n array([[1., 2., 5.],\n [3., 4., 6.]])\n \"\"\"\n return _npi.concatenate(*seq, axis=axis, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef append(arr, values, axis=None): # pylint: disable=redefined-outer-name\n \"\"\"\n Append values to the end of an array.\n\n Parameters\n ----------\n arr : ndarray\n Values are appended to a copy of this array.\n values : ndarray\n These values are appended to a copy of `arr`. It must be of the\n correct shape (the same shape as `arr`, excluding `axis`). If\n `axis` is not specified, `values` can be any shape and will be\n flattened before use.\n axis : int, optional\n The axis along which `values` are appended. If `axis` is not\n given, both `arr` and `values` are flattened before use.\n\n Returns\n -------\n append : ndarray\n A copy of `arr` with `values` appended to `axis`. Note that\n `append` does not occur in-place: a new array is allocated and\n filled. If `axis` is None, `out` is a flattened array.\n\n Examples\n --------\n >>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))\n array([1., 2., 3., 4., 5., 6., 7., 8., 9.])\n\n When `axis` is specified, `values` must have the correct shape.\n\n >>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)\n array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n \"\"\"\n return _npi.concatenate(arr, values, axis=axis, out=None)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef stack(arrays, axis=0, out=None):\n \"\"\"Join a sequence of arrays along a new axis.\n The axis parameter specifies the index of the new axis in the dimensions of the result.\n For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.\n\n Parameters\n ----------\n arrays : sequence of ndarray\n Each array must have the same shape.\n axis : int, optional\n The axis in the result array along which the input arrays are stacked.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be correct,\n matching that of what stack would have returned if no out argument were specified.\n\n Returns\n -------\n stacked : ndarray\n The stacked array has one more dimension than the input arrays.\"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _npi.stack(*arrays, axis=axis, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vstack(arrays, out=None):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _npi.vstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef row_stack(arrays):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n def get_list(arrays):\n if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):\n raise ValueError(\"expected iterable for arrays but got {}\".format(type(arrays)))\n return [arr for arr in arrays]\n\n arrays = get_list(arrays)\n return _npi.vstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Returns\n --------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _npi.column_stack(*tup)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hstack(arrays):\n \"\"\"\n Stack arrays in sequence horizontally (column wise).\n This is equivalent to concatenation along the second axis,\n except for 1-D arrays where it concatenates along the first axis.\n Rebuilds arrays divided by hsplit.\n This function makes most sense for arrays with up to 3 dimensions.\n For instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions concatenate,\n stack and block provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n Examples\n --------\n >>> from mxnet import np,npx\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.hstack((a,b))\n array([1., 2., 3., 2., 3., 4.])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.hstack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _npi.hstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef dstack(arrays):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n \"\"\"\n return _npi.dstack(*arrays)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef maximum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise maximum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef minimum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise minimum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\"\"\"\n return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef swapaxes(a, axis1, axis2):\n \"\"\"Interchange two axes of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis1 : int\n First axis.\n axis2 : int\n Second axis.\n\n Returns\n -------\n a_swapped : ndarray\n Swapped array. This is always a copy of the input array.\n \"\"\"\n return _npi.swapaxes(a, dim1=axis1, dim2=axis2)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef clip(a, a_min, a_max, out=None):\n \"\"\"clip(a, a_min, a_max, out=None)\n\n Clip (limit) the values in an array.\n Given an interval, values outside the interval are clipped to\n the interval edges. For example, if an interval of ``[0, 1]``\n is specified, values smaller than 0 become 0, and values larger\n than 1 become 1.\n\n Parameters\n ----------\n a : ndarray\n Array containing elements to clip.\n a_min : scalar or `None`\n Minimum value. If `None`, clipping is not performed on lower\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n a_max : scalar or `None`\n Maximum value. If `None`, clipping is not performed on upper\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n out : ndarray, optional\n The results will be placed in this array. It may be the input\n array for in-place clipping. `out` must be of the right shape\n to hold the output. Its type is preserved.\n\n Returns\n -------\n clipped_array : ndarray\n An array with the elements of `a`, but where values\n < `a_min` are replaced with `a_min`, and those > `a_max`\n with `a_max`.\n\n Notes\n -----\n ndarray `a_min` and `a_max` are not supported.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> np.clip(a, 1, 8)\n array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)\n >>> np.clip(a, 3, 6, out=a)\n array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)\n \"\"\"\n if a_min is None and a_max is None:\n raise ValueError('array_clip: must set either max or min')\n if a_min is None:\n a_min = float('-inf')\n if a_max is None:\n a_max = float('inf')\n return _npi.clip(a, a_min, a_max, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argmax(a, axis=None, out=None):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmax(a)\n array(5.)\n >>> np.argmax(a, axis=0)\n array([1., 1., 1.])\n >>> np.argmax(a, axis=1)\n array([2., 2.])\n\n >>> b = np.arange(6)\n >>> b[1] = 5\n >>> b\n array([0., 5., 2., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(1.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmax(a, axis=1, out=b)\n array([2., 2.])\n >>> b\n array([2., 2.])\n \"\"\"\n return _npi.argmax(a, axis=axis, keepdims=False, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef argmin(a, axis=None, out=None):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n Notes\n -----\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmin(a)\n array(0.)\n >>> np.argmin(a, axis=0)\n array([0., 0., 0.])\n >>> np.argmin(a, axis=1)\n array([0., 0.])\n\n >>> b = np.arange(6)\n >>> b[2] = 0\n >>> b\n array([0., 1., 0., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(0.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmin(a, axis=1, out=b)\n array([0., 0.])\n >>> b\n array([0., 0.])\n \"\"\"\n return _npi.argmin(a, axis=axis, keepdims=False, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef average(a, axis=None, weights=None, returned=False, out=None):\n \"\"\"\n Compute the weighted average along the specified axis.\n\n Parameters\n --------\n a : ndarray\n Array containing data to be averaged.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to average a.\n The default, axis=None, will average over\n all of the elements of the input array.\n If axis is negative it counts from the last to the first axis.\n New in version 1.7.0.\n If axis is a tuple of ints, averaging is\n performed on all of the axes specified in the tuple\n instead of a single axis or all the axes as before.\n weights : ndarray, optional\n An array of weights associated with the values in a, must be the same dtype with a.\n Each value in a contributes to the average according to its associated weight.\n The weights array can either be 1-D (in which case its length must be\n the size of a along the given axis) or of the same shape as a.\n If weights=None, then all data in a are assumed to have a weight equal to one.\n The 1-D calculation is: avg = sum(a * weights) / sum(weights)\n The only constraint on weights is that sum(weights) must not be 0.\n returned : bool, optional\n Default is False.\n If True, the tuple (average, sum_of_weights) is returned,\n otherwise only the average is returned.\n If weights=None, sum_of_weights is equivalent to\n the number of elements over which the average is taken.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n\n Returns\n --------\n retval, [sum_of_weights] : ndarray\n Return the average along the specified axis.\n When returned is True, return a tuple with the average as the first element\n and the sum of the weights as the second element. sum_of_weights is of the same type as retval.\n If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a.\n\n Raises\n --------\n MXNetError\n - When all weights along axis sum to zero.\n - When the length of 1D weights is not the same as the shape of a along axis.\n - When given 1D weights, the axis is not specified or is not int.\n - When the shape of weights and a differ, but weights are not 1D.\n\n See also\n --------\n mean\n\n Notes\n --------\n This function differs from the original `numpy.average`\n <https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in\n the following way(s):\n\n - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens\n - Does not support complex dtype\n - The dtypes of a and weights must be the same\n - Integral a results in float32 returned dtype, not float64\n\n Examples\n --------\n >>> data = np.arange(1, 5)\n >>> data\n array([1., 2., 3., 4.])\n >>> np.average(data)\n array(2.5)\n >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))\n array(4.)\n >>> data = np.arange(6).reshape((3,2))\n >>> data\n array([[0., 1.],\n [2., 3.],\n [4., 5.]])\n >>> weights = np.array([0.25, 0.75])\n array([0.25, 0.75])\n >>> np.average(data, axis=1, weights=weights)\n array([0.75, 2.75, 4.75])\n \"\"\"\n if weights is None:\n return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)\n else:\n return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"\n mean(a, axis=None, dtype=None, out=None, keepdims=None)\n Compute the arithmetic mean along the specified axis.\n Returns the average of the array elements.\n The average is taken over the flattened array by default, otherwise over the specified axis.\n Parameters\n ----------\n a : ndarray\n ndarray containing numbers whose mean is desired.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.\n If this is a tuple of ints, a mean is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the mean. For integer inputs, the default is float32;\n for floating point inputs, it is the same as the input dtype.\n out : ndarray, optional\n Alternate output array in which to place the result. The default is None; if provided,\n it must have the same shape and type as the expected output\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result\n as dimensions with size one. With this option, the result will broadcast correctly\n against the input array.\n If the default value is passed, then keepdims will not be passed through to the mean\n method of sub-classes of ndarray, however any non-default value will be. If the sub-class\n method does not implement keepdims any exceptions will be raised.\n Returns\n -------\n m : ndarray, see dtype parameter above\n If out=None, returns a new array containing the mean values,\n otherwise a reference to the output array is returned.\n Notes\n -----\n This function differs from the original `numpy.mean\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in\n the following way(s):\n - only ndarray is accepted as valid input, python iterables or scalar is not supported\n - default data type for integer input is float32\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.mean(a)\n array(2.5)\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0,:] = 1.0\n >>> a[1,:] = 0.1\n >>> np.mean(a)\n array(0.55)\n >>> np.mean(a, dtype=np.float64)\n array(0.55)\n \"\"\"\n return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the standard deviation along the specified axis.\n Returns the standard deviation, a measure of the spread of a distribution,\n of the array elements. The standard deviation is computed for the\n flattened array by default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n Calculate the standard deviation of these values.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the standard deviation is computed. The\n default is to compute the standard deviation of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a standard deviation is performed over\n multiple axes, instead of a single axis or all the axes as before.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type (of the calculated\n values) will be cast if necessary.\n ddof : int, optional\n Means Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n By default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `std` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n standard_deviation : ndarray, see dtype parameter above.\n If `out` is None, return a new array containing the standard deviation,\n otherwise return a reference to the output array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.std(a)\n 1.1180339887498949 # may vary\n >>> np.std(a, axis=0)\n array([1., 1.])\n >>> np.std(a, axis=1)\n array([0.5, 0.5])\n In single precision, std() can be inaccurate:\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.std(a)\n array(0.45)\n >>> np.std(a, dtype=np.float64)\n array(0.45, dtype=float64)\n \"\"\"\n return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the variance along the specified axis.\n Returns the variance of the array elements, a measure of the spread of a\n distribution. The variance is computed for the flattened array by\n default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n Array containing numbers whose variance is desired. If `a` is not an\n array, a conversion is attempted.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the variance is computed. The default is to\n compute the variance of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a variance is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the variance. For arrays of integer type\n the default is `float32`; for arrays of float types it is the same as\n the array type.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output, but the type is cast if\n necessary.\n ddof : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n ``N - ddof``, where ``N`` represents the number of elements. By\n default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `var` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n variance : ndarray, see dtype parameter above\n If ``out=None``, returns a new array containing the variance;\n otherwise, a reference to the output array is returned.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.var(a)\n array(1.25)\n >>> np.var(a, axis=0)\n array([1., 1.])\n >>> np.var(a, axis=1)\n array([0.25, 0.25])\n\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.var(a)\n array(0.2025)\n >>> np.var(a, dtype=np.float64)\n array(0.2025, dtype=float64)\n >>> ((1-0.55)**2 + (0.1-0.55)**2)/2\n 0.2025\n \"\"\"\n return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.ndarray.numpy')\ndef indices(dimensions, dtype=_np.int32, ctx=None):\n \"\"\"Return an array representing the indices of a grid.\n\n Compute an array where the subarrays contain index values 0,1,...\n varying only along the corresponding axis.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the grid.\n dtype : data-type, optional\n The desired data-type for the array. Default is `float32`.\n ctx : device context, optional\n Device context on which the memory is allocated. Default is\n `mxnet.context.current_context()`.\n\n Returns\n -------\n grid : ndarray\n The array of grid indices,\n ``grid.shape = (len(dimensions),) + tuple(dimensions)``.\n\n Notes\n -----\n The output shape is obtained by prepending the number of dimensions\n in front of the tuple of dimensions, i.e. if `dimensions` is a tuple\n ``(r0, ..., rN-1)`` of length ``N``, the output shape is\n ``(N,r0,...,rN-1)``.\n\n The subarrays ``grid[k]`` contains the N-D array of indices along the\n ``k-th`` axis. Explicitly::\n\n grid[k,i0,i1,...,iN-1] = ik\n\n Examples\n --------\n >>> grid = np.indices((2, 3))\n >>> grid.shape\n (2, 2, 3)\n >>> grid[0] # row indices\n array([[0, 0, 0],\n [1, 1, 1]])\n >>> grid[1] # column indices\n array([[0, 0, 0],\n [1, 1, 1]], dtype=int32)\n\n The indices can be used as an index into an array.\n\n >>> x = np.arange(20).reshape(5, 4)\n >>> row, col = np.indices((2, 3))\n >>> x[row, col]\n array([[0., 1., 2.],\n [4., 5., 6.]])\n\n Note that it would be more straightforward in the above example to\n extract the required elements directly with ``x[:2, :3]``.\n \"\"\"\n if isinstance(dimensions, (tuple, list)):\n if ctx is None:\n ctx = current_context()\n return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)\n else:\n raise ValueError(\"The dimensions must be sequence of ints\")\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef copysign(x1, x2, out=None, **kwargs):\n r\"\"\"\n Change the sign of x1 to that of x2, element-wise.\n\n If `x2` is a scalar, its sign will be copied to all elements of `x1`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Values to change the sign of.\n x2 : ndarray or scalar\n The sign of `x2` is copied to `x1`.\n out : ndarray or None, optional\n A location into which the result is stored. It must be of the\n right shape and right type to hold the output. If not provided\n or `None`,a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The values of `x1` with the sign of `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -------\n This function differs from the original `numpy.copysign\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in\n the following aspects:\n\n - ``where`` param is not supported.\n\n Examples\n --------\n >>> np.copysign(1.3, -1)\n -1.3\n >>> 1/np.copysign(0, 1)\n inf\n >>> 1/np.copysign(0, -1)\n -inf\n\n >>> a = np.array([-1, 0, 1])\n >>> np.copysign(a, -1.1)\n array([-1., -0., -1.])\n >>> np.copysign(a, np.arange(3)-1)\n array([-1., 0., 1.])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef ravel(x, order='C'):\n r\"\"\"\n ravel(x)\n\n Return a contiguous flattened array.\n A 1-D array, containing the elements of the input, is returned. A copy is\n made only if needed.\n\n Parameters\n ----------\n x : ndarray\n Input array. The elements in `x` are read in row-major, C-style order and\n packed as a 1-D array.\n order : `C`, optional\n Only support row-major, C-style order.\n\n Returns\n -------\n y : ndarray\n y is an array of the same subtype as `x`, with shape ``(x.size,)``.\n Note that matrices are special cased for backward compatibility, if `x`\n is a matrix, then y is a 1-D ndarray.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support row-major, C-style order.\n\n Examples\n --------\n It is equivalent to ``reshape(x, -1)``.\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6]])\n >>> print(np.ravel(x))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(x.reshape(-1))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(np.ravel(x.T))\n [1. 4. 2. 5. 3. 6.]\n \"\"\"\n if order != 'C':\n raise NotImplementedError('order {} is not supported'.format(order))\n if isinstance(x, numeric_types):\n return _np.reshape(x, -1)\n elif isinstance(x, NDArray):\n return _npi.reshape(x, -1)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\ndef unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name\n \"\"\"\n Converts a flat index or array of flat indices into a tuple of coordinate arrays.\n\n Parameters:\n -------------\n indices : array_like\n An integer array whose elements are indices into the flattened version of an array of dimensions shape.\n Before version 1.6.0, this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling indices.\n\n Returns:\n -------------\n unraveled_coords : ndarray\n Each row in the ndarray has the same shape as the indices array.\n Each column in the ndarray represents the unravelled index\n\n Examples:\n -------------\n >>> np.unravel_index([22, 41, 37], (7,6))\n ([3. 6. 6.]\n [4. 5. 1.])\n >>> np.unravel_index(1621, (6,7,8,9))\n (3, 1, 4, 1)\n \"\"\"\n if order == 'C':\n if isinstance(indices, numeric_types):\n return _np.unravel_index(indices, shape)\n ret = _npi.unravel_index_fallback(indices, shape=shape)\n ret_list = []\n for item in ret:\n ret_list += [item]\n return tuple(ret_list)\n else:\n raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')\n\n\ndef diag_indices_from(arr):\n \"\"\"\n This returns a tuple of indices that can be used to access the main diagonal of an array\n a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is\n the usual diagonal, for a.ndim > 2 this is the set of indices to access\n a[i, i, ..., i] for i = [0..n-1].\n\n Parameters:\n -------------\n arr : ndarray\n Input array for acessing the main diagonal. All dimensions\n should have equal length.\n\n Return:\n -------------\n diag: tuple of ndarray\n indices of the main diagonal.\n\n Examples:\n -------------\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> idx = np.diag_indices_from(a)\n >>> idx\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n >>> a[idx] = 100\n >>> a\n array([[100, 1, 2, 3],\n [ 4, 100, 6, 7],\n [ 8, 9, 100, 11],\n [ 12, 13, 14, 100]])\n \"\"\"\n return tuple(_npi.diag_indices_from(arr))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hanning(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the Hanning window.\n\n The Hanning window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n\n See Also\n --------\n blackman, hamming\n\n Notes\n -----\n The Hanning window is defined as\n\n .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hanning was named for Julius von Hann, an Austrian meteorologist.\n It is also known as the Cosine Bell. Some authors prefer that it be\n called a Hann window, to help avoid confusion with the very similar\n Hamming window.\n\n Most references to the Hanning window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\",\n The University of Alberta Press, 1975, pp. 106-108.\n .. [3] Wikipedia, \"Window function\",\n http://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hanning(12)\n array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,\n 0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,\n 0.07937312, 0. ])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hanning(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Hann window\")\n Text(0.5, 1.0, 'Hann window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = current_context()\n return _npi.hanning(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef hamming(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the hamming window.\n\n The hamming window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n\n See Also\n --------\n blackman, hanning\n\n Notes\n -----\n The Hamming window is defined as\n\n .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hamming was named for R. W. Hamming, an associate of J. W. Tukey\n and is described in Blackman and Tukey. It was recommended for\n smoothing the truncated autocovariance function in the time domain.\n Most references to the Hamming window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\", The\n University of Alberta Press, 1975, pp. 109-110.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hamming(12)\n array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,\n 0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,\n 0.15302327, 0.08000001])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hamming(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"hamming window\")\n Text(0.5, 1.0, 'hamming window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = current_context()\n return _npi.hamming(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef blackman(M, dtype=_np.float32, ctx=None):\n r\"\"\"Return the Blackman window.\n\n The Blackman window is a taper formed by using the first three\n terms of a summation of cosines. It was designed to have close to the\n minimal leakage possible. It is close to optimal, only slightly worse\n than a Kaiser window.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is `float32`. Note that you need\n select numpy.float32 or float64 in this operator.\n ctx : Context, optional\n An optional device context (default is the current default context).\n\n Returns\n -------\n out : ndarray\n The window, with the maximum value normalized to one (the value one\n appears only if the number of samples is odd).\n\n See Also\n --------\n hamming, hanning\n\n Notes\n -----\n The Blackman window is defined as\n\n .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/{M-1}) + 0.08 \\cos(4\\pi n/{M-1})\n\n Most references to the Blackman window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function. It is known as a\n \"near optimal\" tapering function, almost as good (by some measures)\n as the kaiser window.\n\n References\n ----------\n Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,\n Dover Publications, New York.\n\n Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.\n Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.\n\n Examples\n --------\n >>> np.blackman(12)\n array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,\n 7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,\n 4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.blackman(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"blackman window\")\n Text(0.5, 1.0, 'blackman window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n if ctx is None:\n ctx = current_context()\n return _npi.blackman(M, dtype=dtype, ctx=ctx)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef flip(m, axis=None, out=None):\n r\"\"\"\n flip(m, axis=None, out=None)\n\n Reverse the order of elements in an array along the given axis.\n\n The shape of the array is preserved, but the elements are reordered.\n\n Parameters\n ----------\n m : ndarray or scalar\n Input array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to flip over. The default,\n axis=None, will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n out : ndarray or scalar, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n out : ndarray or scalar\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> np.flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> np.flip(A)\n array([[[7, 6],\n [5, 4]],\n [[3, 2],\n [1, 0]]])\n >>> np.flip(A, (0, 2))\n array([[[5, 4],\n [7, 6]],\n [[1, 0],\n [3, 2]]])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(m, numeric_types):\n return _np.flip(m, axis)\n elif isinstance(m, ndarray):\n return _npi.flip(m, axis, out=out)\n else:\n raise TypeError('type {} not supported'.format(str(type(m))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef flipud(m):\n r\"\"\"\n flipud(*args, **kwargs)\n\n Flip array in the up/down direction.\n\n Flip the entries in each column in the up/down direction.\n Rows are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array.\n\n Returns\n -------\n out : array_like\n A view of `m` with the rows reversed. Since a view is\n returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n fliplr : Flip array in the left/right direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to ``m[::-1,...]``.\n Does not require the array to be two-dimensional.\n\n Examples\n --------\n >>> A = np.diag(np.array([1.0, 2, 3]))\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.flipud(A)\n array([[0., 0., 3.],\n [0., 2., 0.],\n [1., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.flipud(A) == A[::-1,...])\n array(True)\n\n >>> np.flipud(np.array([1,2]))\n array([2., 1.])\n \"\"\"\n return flip(m, 0)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef fliplr(m):\n r\"\"\"\n fliplr(*args, **kwargs)\n\n Flip array in the left/right direction.\n\n Flip the entries in each row in the left/right direction.\n Columns are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array, must be at least 2-D.\n\n Returns\n -------\n f : ndarray\n A view of `m` with the columns reversed. Since a view\n is returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n flipud : Flip array in the up/down direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to m[:,::-1]. Requires the array to be at least 2-D.\n\n Examples\n --------\n >>> A = np.diag(np.array([1.,2.,3.]))\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.fliplr(A)\n array([[0., 0., 1.],\n [0., 2., 0.],\n [3., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.fliplr(A) == A[:,::-1,...])\n array(True)\n \"\"\"\n return flip(m, 1)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef around(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n around(x, decimals=0, out=None)\n\n Evenly round to the given number of decimals.\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n decimals : int, optional\n Number of decimal places to round to (default: 0). If\n decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n rounded_array : ndarray or scalar\n An array of the same type as `x`, containing the rounded values.\n A reference to the result is returned.\n\n Notes\n -----\n For values exactly halfway between rounded decimal values, NumPy\n rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,\n -0.5 and 0.5 round to 0.0, etc.\n\n This function differs from the original numpy.prod in the following aspects:\n\n - Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n - Cannot support complex-valued number.\n\n Examples\n --------\n >>> np.around([0.37, 1.64])\n array([ 0., 2.])\n >>> np.around([0.37, 1.64], decimals=1)\n array([ 0.4, 1.6])\n >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value\n array([ 0., 2., 2., 4., 4.])\n >>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned\n array([ 1, 2, 3, 11])\n >>> np.around([1, 2, 3, 11], decimals=-1)\n array([ 0, 0, 0, 10])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(x, numeric_types):\n return _np.around(x, decimals, **kwargs)\n elif isinstance(x, ndarray):\n return _npi.around(x, decimals, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef round(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n round_(a, decimals=0, out=None)\n Round an array to the given number of decimals.\n\n See Also\n --------\n around : equivalent function; see for details.\n \"\"\"\n from ...numpy import ndarray\n if isinstance(x, numeric_types):\n return _np.around(x, decimals, **kwargs)\n elif isinstance(x, ndarray):\n return _npi.around(x, decimals, out=out, **kwargs)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef arctan2(x1, x2, out=None, **kwargs):\n r\"\"\"\n Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.\n\n The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is\n the signed angle in radians between the ray ending at the origin and\n passing through the point (1,0), and the ray ending at the origin and\n passing through the point (`x2`, `x1`). (Note the role reversal: the\n \"`y`-coordinate\" is the first function parameter, the \"`x`-coordinate\"\n is the second.) By IEEE convention, this function is defined for\n `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see\n Notes for specific values).\n\n This function is not defined for complex-valued arguments; for the\n so-called argument of complex values, use `angle`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n `y`-coordinates.\n x2 : ndarray or scalar\n `x`-coordinates. `x2` must be broadcastable to match the shape of\n `x1` or vice versa.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if\n `x1` and `x2` are scalars.\n\n Notes\n -----\n *arctan2* is identical to the `atan2` function of the underlying\n C library. The following special values are defined in the C\n standard: [1]_\n\n ====== ====== ================\n `x1` `x2` `arctan2(x1,x2)`\n ====== ====== ================\n +/- 0 +0 +/- 0\n +/- 0 -0 +/- pi\n > 0 +/-inf +0 / +pi\n < 0 +/-inf -0 / -pi\n +/-inf +inf +/- (pi/4)\n +/-inf -inf +/- (3*pi/4)\n ====== ====== ================\n\n Note that +0 and -0 are distinct floating point numbers, as are +inf\n and -inf.\n\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n References\n ----------\n .. [1] ISO/IEC standard 9899:1999, \"Programming language C.\"\n\n Examples\n --------\n Consider four points in different quadrants:\n\n >>> x = np.array([-1, +1, +1, -1])\n >>> y = np.array([-1, -1, +1, +1])\n >>> np.arctan2(y, x) * 180 / np.pi\n array([-135., -45., 45., 135.])\n\n Note the order of the parameters. `arctan2` is defined also when `x2` = 0\n and at several other special points, obtaining values in\n the range ``[-pi, pi]``:\n\n >>> x = np.array([1, -1])\n >>> y = np.array([0, 0])\n >>> np.arctan2(x, y)\n array([ 1.5707964, -1.5707964])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,\n _npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef hypot(x1, x2, out=None, **kwargs):\n r\"\"\"\n Given the \"legs\" of a right triangle, return its hypotenuse.\n\n Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or\n `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),\n it is broadcast for use with each element of the other argument.\n\n Parameters\n ----------\n x1, x2 : ndarray\n Leg of the triangle(s).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n z : ndarray\n The hypotenuse of the triangle(s).\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n This function differs from the original numpy.arange in the following aspects:\n - Only support float16, float32 and float64.\n\n Examples\n --------\n >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n\n Example showing broadcast of scalar_like argument:\n\n >>> np.hypot(3*np.ones((3, 3)), [4])\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_and(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_and(13, 17)\n 1\n\n >>> np.bitwise_and(14, 13)\n 12\n >>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)\n array([12, 1], dtype=int32)\n\n >>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))\n array([0, 1], dtype=int32)\n >>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))\n array([ 2, 4, 16], dtype=int32)\n >>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([False, True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_xor(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_xor(13, 17)\n 28\n\n >>> np.bitwise_xor(31, 5)\n 26\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5)\n array([26, 6])\n\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([26, 5])\n >>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef bitwise_or(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise OR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_or(13, 17)\n 29\n\n >>> np.bitwise_or(31, 5)\n 31\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), 5)\n array([31, 7])\n\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([31, 7])\n >>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_binary_func\ndef ldexp(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns x1 * 2**x2, element-wise.\n The mantissas `x1` and twos exponents `x2` are used to construct\n floating point numbers ``x1 * 2**x2``.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Array of multipliers.\n x2 : ndarray or scalar, int\n Array of twos exponents.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The result of ``x1 * 2**x2``.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n Complex dtypes are not supported, they will raise a TypeError.\n Different from numpy, we allow x2 to be float besides int.\n `ldexp` is useful as the inverse of `frexp`, if used by itself it is\n more clear to simply use the expression ``x1 * 2**x2``.\n\n Examples\n --------\n >>> np.ldexp(5, np.arange(4))\n array([ 5., 10., 20., 40.])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef inner(a, b):\n r\"\"\"\n Inner product of two arrays.\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : ndarray\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n `out.shape = a.shape[:-1] + b.shape[:-1]`\n\n Raises\n ------\n ValueError\n If the last dimension of `a` and `b` has different size.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n np.inner(a, b) = sum(a[:]*b[:])\n More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n or explicitly::\n np.inner(a, b)[i0,...,ir-1,j0,...,js-1]\n = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])\n In addition `a` or `b` may be scalars, in which case::\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n A multidimensional example:\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> np.inner(a, b)\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n \"\"\"\n return tensordot(a, b, [-1, -1])\n\n\n@set_module('mxnet.ndarray.numpy')\ndef outer(a, b):\n r\"\"\"\n Compute the outer product of two vectors.\n Given two vectors, ``a = [a0, a1, ..., aM]`` and\n ``b = [b0, b1, ..., bN]``,\n the outer product [1]_ is::\n [[a0*b0 a0*b1 ... a0*bN ]\n [a1*b0 .\n [ ... .\n [aM*b0 aM*bN ]]\n\n Parameters\n ----------\n a : (M,) ndarray\n First input vector. Input is flattened if\n not already 1-dimensional.\n b : (N,) ndarray\n Second input vector. Input is flattened if\n not already 1-dimensional.\n\n Returns\n -------\n out : (M, N) ndarray\n ``out[i, j] = a[i] * b[j]``\n See also\n --------\n inner\n einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.\n ufunc.outer : A generalization to N dimensions and other operations.\n ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.\n References\n ----------\n .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd\n ed., Baltimore, MD, Johns Hopkins University Press, 1996,\n pg. 8.\n Examples\n --------\n Make a (*very* coarse) grid for computing a Mandelbrot set:\n >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))\n >>> rl\n array([[-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.]])\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 0)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef vdot(a, b):\n r\"\"\"\n Return the dot product of two vectors.\n Note that `vdot` handles multidimensional arrays differently than `dot`:\n it does *not* perform a matrix product, but flattens input arguments\n to 1-D vectors first. Consequently, it should only be used for vectors.\n\n Parameters\n ----------\n a : ndarray\n First argument to the dot product.\n b : ndarray\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n Note that higher-dimensional arrays are flattened!\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n 30\n >>> np.vdot(b, a)\n 30\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 1)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef equal(x1, x2, out=None):\n \"\"\"\n Return (x1 == x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n not_equal, greater_equal, less_equal, greater, less\n Examples\n --------\n >>> np.equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.equal(1, np.ones(1))\n array([ True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef not_equal(x1, x2, out=None):\n \"\"\"\n Return (x1 != x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.not_equal(1, np.ones(1))\n array([False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef greater(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 > x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater(1, np.ones(1))\n array([False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,\n _npi.less_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef less(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 < x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.less(1, np.ones(1))\n array([False])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef greater_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 >= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,\n _npi.less_equal_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef less_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 <= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.less_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,\n _npi.greater_equal_scalar, out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef rot90(m, k=1, axes=(0, 1)):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by axes.\n Rotation direction is from the first towards the second axis.\n Parameters\n ----------\n m : ndarray\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes: (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n -----\n rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))\n rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], 'int')\n >>> m\n array([[1, 2],\n [3, 4]], dtype=int64)\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]], dtype=int64)\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]], dtype=int64)\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1., 3.],\n [0., 2.]],\n\n [[5., 7.],\n [4., 6.]]])\n \"\"\"\n return _npi.rot90(m, k=k, axes=axes)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef einsum(*operands, **kwargs):\n r\"\"\"\n einsum(subscripts, *operands, out=None, optimize=False)\n\n Evaluates the Einstein summation convention on the operands.\n\n Using the Einstein summation convention, many common multi-dimensional,\n linear algebraic array operations can be represented in a simple fashion.\n In *implicit* mode `einsum` computes these values.\n\n In *explicit* mode, `einsum` provides further flexibility to compute\n other array operations that might not be considered classical Einstein\n summation operations, by disabling, or forcing summation over specified\n subscript labels.\n\n See the notes and examples for clarification.\n\n Parameters\n ----------\n subscripts : str\n Specifies the subscripts for summation as comma separated list of\n subscript labels. An implicit (classical Einstein summation)\n calculation is performed unless the explicit indicator '->' is\n included as well as subscript labels of the precise output form.\n operands : list of ndarray\n These are the arrays for the operation.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n optimize : {False, True}, optional\n Controls if intermediate optimization should occur. No optimization\n will occur if False. Defaults to False.\n\n Returns\n -------\n output : ndarray\n The calculation based on the Einstein summation convention.\n\n Notes\n -----\n The Einstein summation convention can be used to compute\n many multi-dimensional, linear algebraic array operations. `einsum`\n provides a succinct way of representing these.\n\n A non-exhaustive list of these operations,\n which can be computed by `einsum`, is shown below along with examples:\n\n * Trace of an array, :py:func:`np.trace`.\n * Return a diagonal, :py:func:`np.diag`.\n * Array axis summations, :py:func:`np.sum`.\n * Transpositions and permutations, :py:func:`np.transpose`.\n * Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.\n * Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.\n * Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.\n * Tensor contractions, :py:func:`np.tensordot`.\n\n The subscripts string is a comma-separated list of subscript labels,\n where each label refers to a dimension of the corresponding operand.\n Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``\n is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label\n appears only once, it is not summed, so ``np.einsum('i', a)`` produces a\n view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``\n describes traditional matrix multiplication and is equivalent to\n :py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one\n operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent\n to :py:func:`np.trace(a) <np.trace>`.\n\n In *implicit mode*, the chosen subscripts are important\n since the axes of the output are reordered alphabetically. This\n means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while\n ``np.einsum('ji', a)`` takes its transpose. Additionally,\n ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,\n ``np.einsum('ij,jh', a, b)`` returns the transpose of the\n multiplication since subscript 'h' precedes subscript 'i'.\n\n In *explicit mode* the output can be directly controlled by\n specifying output subscript labels. This requires the\n identifier '->' as well as the list of output subscript labels.\n This feature increases the flexibility of the function since\n summing can be disabled or forced when required. The call\n ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,\n and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.\n The difference is that `einsum` does not allow broadcasting by default.\n Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the\n order of the output subscript labels and therefore returns matrix\n multiplication, unlike the example above in implicit mode.\n\n To enable and control broadcasting, use an ellipsis. Default\n NumPy-style broadcasting is done by adding an ellipsis\n to the left of each term, like ``np.einsum('...ii->...i', a)``.\n To take the trace along the first and last axes,\n you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix\n product with the left-most indices instead of rightmost, one can do\n ``np.einsum('ij...,jk...->ik...', a, b)``.\n\n When there is only one operand, no axes are summed, and no output\n parameter is provided, a view into the operand is returned instead\n of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``\n produces a view.\n\n The ``optimize`` argument which will optimize the contraction order\n of an einsum expression. For a contraction with three or more operands this\n can greatly increase the computational efficiency at the cost of a larger\n memory footprint during computation.\n\n Typically a 'greedy' algorithm is applied which empirical tests have shown\n returns the optimal path in the majority of cases. 'optimal' is not supported\n for now.\n\n This function differs from the original `numpy.einsum\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in\n the following way(s):\n\n - Does not support 'optimal' strategy\n - Does not support the alternative subscript like\n `einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`\n - Does not produce view in any cases\n\n Examples\n --------\n >>> a = np.arange(25).reshape(5,5)\n >>> b = np.arange(5)\n >>> c = np.arange(6).reshape(2,3)\n\n Trace of a matrix:\n\n >>> np.einsum('ii', a)\n array(60.)\n\n Extract the diagonal (requires explicit form):\n\n >>> np.einsum('ii->i', a)\n array([ 0., 6., 12., 18., 24.])\n\n Sum over an axis (requires explicit form):\n\n >>> np.einsum('ij->i', a)\n array([ 10., 35., 60., 85., 110.])\n >>> np.sum(a, axis=1)\n array([ 10., 35., 60., 85., 110.])\n\n For higher dimensional arrays summing a single axis can be done with ellipsis:\n\n >>> np.einsum('...j->...', a)\n array([ 10., 35., 60., 85., 110.])\n\n Compute a matrix transpose, or reorder any number of axes:\n\n >>> np.einsum('ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.einsum('ij->ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.transpose(c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n\n Vector inner products:\n\n >>> np.einsum('i,i', b, b)\n array(30.)\n\n Matrix vector multiplication:\n\n >>> np.einsum('ij,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.dot(a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.einsum('...j,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n\n Broadcasting and scalar multiplication:\n\n >>> np.einsum('..., ...', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.einsum(',ij', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.multiply(3, c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n\n Vector outer product:\n\n >>> np.einsum('i,j', np.arange(2)+1, b)\n array([[0., 1., 2., 3., 4.],\n [0., 2., 4., 6., 8.]])\n\n Tensor contraction:\n\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> np.einsum('ijk,jil->kl', a, b)\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n\n Example of ellipsis use:\n\n >>> a = np.arange(6).reshape((3,2))\n >>> b = np.arange(12).reshape((4,3))\n >>> np.einsum('ki,jk->ij', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('ki,...k->i...', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('k...,jk', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n\n Chained array operations. For more complicated contractions, speed ups\n might be achieved by repeatedly computing a 'greedy' path. Performance\n improvements can be particularly significant with larger arrays:\n\n >>> a = np.ones(64).reshape(2,4,8)\n # Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)\n # Greedy `einsum` (faster optimal path approximation): ~0.117ms\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)\n \"\"\"\n # Grab non-einsum kwargs; do not optimize by default.\n optimize_arg = kwargs.pop('optimize', False)\n out = kwargs.pop('out', None)\n\n subscripts = operands[0]\n operands = operands[1:]\n return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef nonzero(a):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of `a`,\n containing the indices of the non-zero elements in that\n dimension. The values in `a` are always returned in\n row-major, C-style order.\n\n To group the indices by element, rather than dimension, use `argwhere`,\n which returns a row for each non-zero element.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n\n Returns\n -------\n tuple_of_arrays : tuple\n Indices of elements that are non-zero.\n\n See Also\n --------\n ndarray.nonzero :\n Equivalent ndarray method.\n\n Notes\n -----\n While the nonzero values can be obtained with ``a[nonzero(a)]``, it is\n recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which\n will correctly handle 0-d arrays.\n\n Examples\n --------\n >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])\n >>> x\n array([[3, 0, 0],\n [0, 4, 0],\n [5, 6, 0]], dtype=int32)\n >>> np.nonzero(x)\n (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))\n\n >>> x[np.nonzero(x)]\n array([3, 4, 5, 6])\n >>> np.transpose(np.stack(np.nonzero(x)))\n array([[0, 0],\n [1, 1],\n [2, 0],\n [2, 1]], dtype=int64)\n\n A common use for ``nonzero`` is to find the indices of an array, where\n a condition is True. Given an array `a`, the condition `a` > 3 is a\n boolean array and since False is interpreted as 0, np.nonzero(a > 3)\n yields the indices of the `a` where the condition is true.\n\n >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)\n >>> a > 3\n array([[False, False, False],\n [ True, True, True],\n [ True, True, True]])\n >>> np.nonzero(a > 3)\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n\n Using this result to index `a` is equivalent to using the mask directly:\n\n >>> a[np.nonzero(a > 3)]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n >>> a[a > 3]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n\n ``nonzero`` can also be called as a method of the array.\n\n >>> (a > 3).nonzero()\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n \"\"\"\n out = _npi.nonzero(a).transpose()\n return tuple([out[i] for i in range(len(out))])\n\n\n@set_module('mxnet.ndarray.numpy')\ndef percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the q-th percentile of the data along the specified axis.\n Returns the q-th percentile(s) of the array elements.\n\n Parameters\n ----------\n a : ndarray\n Input array\n q : ndarray\n Percentile or sequence of percentiles to compute.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the percentiles are computed. The default is to\n compute the percentile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have the same\n shape and buffer length as the expected output, but the type (of the output)\n will be cast if necessary.\n overwrite_input : bool, optional (Not supported yet)\n If True, then allow the input array a to be modified by intermediate calculations,\n to save memory. In this case, the contents of the input a after this function\n completes is undefined.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use when the\n desired percentile lies between two data points i < j:\n 'linear': i + (j - i) * fraction, where fraction is the fractional part of the\n index surrounded by i and j.\n 'lower': i.\n 'higher': j.\n 'nearest': i or j, whichever is nearest.\n 'midpoint': (i + j) / 2.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as\n dimensions with size one. With this option, the result will broadcast\n correctly against the original array a.\n\n Returns\n -------\n percentile : scalar or ndarray\n Output array.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.percentile(a, np.array(50))\n array(3.5)\n >>> np.percentile(a, np.array(50), axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.percentile(a, np.array(50), axis=1)\n array([7., 2.])\n >>> np.percentile(a, np.array(50), axis=1, keepdims=True)\n array([[7.],\n [2.]])\n\n >>> m = np.percentile(a, np.array(50), axis=0)\n >>> out = np.zeros_like(m)\n >>> np.percentile(a, np.array(50), axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> m\n array([6.5, 4.5, 2.5])\n \"\"\"\n if overwrite_input is not None:\n raise NotImplementedError('overwrite_input is not supported yet')\n if isinstance(q, numeric_types):\n return _npi.percentile(a, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=q, out=out)\n return _npi.percentile(a, q, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=None, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the q-th quantile of the data along the specified axis.\n New in version 1.15.0.\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n q : ndarray\n Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the quantiles are computed.\n The default is to compute the quantile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result.\n It must have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use\n when the desired quantile lies between two data points i < j:\n linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.\n lower: i.\n higher: j.\n nearest: i or j, whichever is nearest.\n midpoint: (i + j) / 2.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the original array a.\n Returns\n -------\n quantile : ndarray\n If q is a single quantile and axis=None, then the result is a scalar.\n If multiple quantiles are given, first axis of the result corresponds to the quantiles.\n The other axes are the axes that remain after the reduction of a.\n If out is specified, that array is returned instead.\n See also\n --------\n mean\n Notes\n -----\n Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum\n to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors\n as well as the interpolation parameter will determine the quantile if the normalized ranking\n does not match the location of q exactly. This function is the same as the median if q=0.5,\n the same as the minimum if q=0.0 and the same as the maximum if q=1.0.\n This function differs from the original `numpy.quantile\n <https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in\n the following aspects:\n - q must be ndarray type even if it is a scalar\n - do not support overwrite_input\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10., 7., 4.],\n [3., 2., 1.]])\n >>> q = np.array(0.5)\n >>> q\n array(0.5)\n >>> np.quantile(a, q)\n array(3.5)\n >>> np.quantile(a, q, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.quantile(a, q, axis=1)\n array([7., 2.])\n >>> np.quantile(a, q, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n >>> m = np.quantile(a, q, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.quantile(a, q, axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> out\n array([6.5, 4.5, 2.5])\n \"\"\"\n if overwrite_input is not None:\n raise NotImplementedError('overwrite_input is not supported yet')\n if isinstance(q, numeric_types):\n return _npi.percentile(a, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=q * 100, out=out)\n return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,\n keepdims=keepdims, q_scalar=None, out=out)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef shares_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays share memory\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n\n This function differs from the original `numpy.shares_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `may_share_memory` in MXNet DeepNumPy\n \"\"\"\n return _npi.share_memory(a, b).item()\n\n\n@set_module('mxnet.ndarray.numpy')\ndef may_share_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n This function differs from the original `numpy.may_share_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in\n the following way(s):\n\n - Does not support `max_work`, it is a dummy argument\n - Actually it is same as `shares_memory` in MXNet DeepNumPy\n \"\"\"\n return _npi.share_memory(a, b).item()\n\n\n@set_module('mxnet.ndarray.numpy')\ndef diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name\n r\"\"\"\n Calculate the n-th discrete difference along the given axis.\n\n Parameters\n ----------\n a : ndarray\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the last axis.\n prepend, append : ndarray, optional\n Not supported yet\n\n Returns\n -------\n diff : ndarray\n The n-th differences.\n The shape of the output is the same as a except along axis where the dimension is smaller by n.\n The type of the output is the same as the type of the difference between any two elements of a.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n Notes\n -----\n Optional inputs `prepend` and `append` are not supported yet\n \"\"\"\n if (prepend or append):\n raise NotImplementedError('prepend and append options are not supported yet')\n return _npi.diff(a, n=n, axis=axis)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef resize(a, new_shape):\n \"\"\"\n Return a new array with the specified shape.\n If the new array is larger than the original array, then the new\n array is filled with repeated copies of `a`. Note that this behavior\n is different from a.resize(new_shape) which fills with zeros instead\n of repeated copies of `a`.\n\n Parameters\n ----------\n a : ndarray\n Array to be resized.\n new_shape : int or tuple of int\n Shape of resized array.\n\n Returns\n -------\n reshaped_array : ndarray\n The new array is formed from the data in the old array, repeated\n if necessary to fill out the required number of elements. The\n data are repeated in the order that they are stored in memory.\n\n See Also\n --------\n ndarray.resize : resize an array in-place.\n\n Notes\n -----\n Warning: This functionality does **not** consider axes separately,\n i.e. it does not apply interpolation/extrapolation.\n It fills the return array with the required number of elements, taken\n from `a` as they are laid out in memory, disregarding strides and axes.\n (This is in case the new shape is smaller. For larger, see above.)\n This functionality is therefore not suitable to resize images,\n or data where each axis represents a separate and distinct entity.\n\n Examples\n --------\n >>> a = np.array([[0, 1], [2, 3]])\n >>> np.resize(a, (2, 3))\n array([[0., 1., 2.],\n [3., 0., 1.]])\n >>> np.resize(a, (1, 4))\n array([[0., 1., 2., 3.]])\n >>> np.resize(a,(2, 4))\n array([[0., 1., 2., 3.],\n [0., 1., 2., 3.]])\n \"\"\"\n return _npi.resize_fallback(a, new_shape=new_shape)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):\n \"\"\"\n Replace NaN with zero and infinity with large finite numbers (default\n behaviour) or with the numbers defined by the user using the `nan`,\n `posinf` and/or `neginf` keywords.\n\n If `x` is inexact, NaN is replaced by zero or by the user defined value in\n `nan` keyword, infinity is replaced by the largest finite floating point\n values representable by ``x.dtype`` or by the user defined value in\n `posinf` keyword and -infinity is replaced by the most negative finite\n floating point values representable by ``x.dtype`` or by the user defined\n value in `neginf` keyword.\n\n For complex dtypes, the above is applied to each of the real and\n imaginary components of `x` separately.\n\n If `x` is not inexact, then no replacements are made.\n\n Parameters\n ----------\n x : ndarray\n Input data.\n copy : bool, optional\n Whether to create a copy of `x` (True) or to replace values\n in-place (False). The in-place operation only occurs if\n casting to an array does not require a copy.\n Default is True.\n nan : int, float, optional\n Value to be used to fill NaN values. If no value is passed\n then NaN values will be replaced with 0.0.\n posinf : int, float, optional\n Value to be used to fill positive infinity values. If no value is\n passed then positive infinity values will be replaced with a very\n large number.\n neginf : int, float, optional\n Value to be used to fill negative infinity values. If no value is\n passed then negative infinity values will be replaced with a very\n small (or negative) number.\n\n .. versionadded:: 1.13\n\n Returns\n -------\n out : ndarray\n `x`, with the non-finite values replaced. If `copy` is False, this may\n be `x` itself.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.nan_to_num(np.inf)\n 1.7976931348623157e+308\n >>> np.nan_to_num(-np.inf)\n -1.7976931348623157e+308\n >>> np.nan_to_num(np.nan)\n 0.0\n >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])\n >>> np.nan_to_num(x)\n array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,\n 1.2800000e+02])\n >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)\n array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,\n 1.2800000e+02])\n >>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype=\"float64\")/0\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y)\n array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],\n [ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)\n >>> np.nan_to_num(y, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n \"\"\"\n if isinstance(x, numeric_types):\n return _np.nan_to_num(x, copy, nan, posinf, neginf)\n elif isinstance(x, NDArray):\n if x.dtype in ['int8', 'uint8', 'int32', 'int64']:\n return x\n if not copy:\n return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)\n return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)\n else:\n raise TypeError('type {} not supported'.format(str(type(x))))\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isnan(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for NaN and return result as a boolean array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is NaN, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n\n This function differs from the original `numpy.isinf\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isnan(np.nan)\n True\n >>> np.isnan(np.inf)\n False\n >>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))\n array([ True, False, False])\n \"\"\"\n return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive or negative infinity.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive or negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n This function differs from the original `numpy.isnan\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n - Does not support complex number for now\n - Input type does not support Python native iterables(list, tuple, ...).\n - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.\n - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.\n - ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isinf(np.inf)\n True\n >>> np.isinf(np.nan)\n False\n >>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))\n array([ True, True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool_)\n >>> np.isinf(x, y)\n array([ True, False, True])\n >>> y\n array([ True, False, True])\n \"\"\"\n return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)\n\n\n@wrap_np_unary_func\ndef isposinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isposinf(np.inf)\n True\n >>> np.isposinf(-np.inf)\n False\n >>> np.isposinf(np.nan)\n False\n >>> np.isposinf(np.array([-np.inf, 0., np.inf]))\n array([False, False, True])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isposinf(x, y)\n array([False, False, True])\n >>> y\n array([False, False, True])\n \"\"\"\n return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isneginf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for negative infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isneginf(-np.inf)\n True\n >>> np.isneginf(np.inf)\n False\n >>> np.isneginf(float('-inf'))\n True\n >>> np.isneginf(np.array([-np.inf, 0., np.inf]))\n array([ True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isneginf(x, y)\n array([ True, False, False])\n >>> y\n array([ True, False, False])\n \"\"\"\n return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isfinite(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for finiteness (not infinity or not Not a Number).\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n Not a Number, positive infinity and negative infinity are considered to be non-finite.\n\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n Also that positive infinity is not equivalent to negative infinity.\n But infinity is equivalent to positive infinity. Errors result if the second argument\n is also supplied when x is a scalar input, or if first and second arguments have different shapes.\n\n Examples\n --------\n >>> np.isfinite(1)\n True\n >>> np.isfinite(0)\n True\n >>> np.isfinite(np.nan)\n False\n >>> np.isfinite(np.inf)\n False\n >>> np.isfinite(-np.inf)\n False\n >>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))\n array([False, True, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isfinite(x, y)\n array([False, True, False])\n >>> y\n array([False, True, False])\n \"\"\"\n return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\ndef where(condition, x=None, y=None): # pylint: disable=too-many-return-statements\n \"\"\"where(condition, [x, y])\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. The rest of this documentation\n covers only the case where all three arguments are provided.\n\n Parameters\n ----------\n condition : ndarray\n Where True, yield `x`, otherwise yield `y`.\n x, y : ndarray\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape. `x` and `y` must have the same dtype.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n This function differs from the original `numpy.where\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in\n the following way(s):\n\n - If `condition` is a scalar, this operator returns x or y directly without broadcasting.\n - If `condition` is ndarray, while both `x` and `y` are scalars,\n the output dtype will be `float32`.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n >>> np.where(a < 5, a, 10*a)\n array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])\n\n This can be used on multidimensional arrays too:\n\n >>> cond = np.array([[True, False], [True, True]])\n >>> x = np.array([[1, 2], [3, 4]])\n >>> y = np.array([[9, 8], [7, 6]])\n >>> np.where(cond, x, y)\n array([[1., 8.],\n [3., 4.]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = onp.ogrid[:3, :4]\n >>> x = np.array(x)\n >>> y = np.array(y)\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]], dtype=int64)\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, -1) # -1 is broadcast\n array([[ 0., 1., 2.],\n [ 0., 2., -1.],\n [ 0., 3., -1.]])\n \"\"\"\n if x is None and y is None:\n return nonzero(condition)\n else:\n if isinstance(condition, numeric_types):\n if condition != 0:\n return x\n else:\n return y\n else:\n if isinstance(x, numeric_types) and isinstance(y, numeric_types):\n return _npi.where_scalar2(condition, float(x), float(y), out=None)\n elif isinstance(x, NDArray) and isinstance(y, NDArray):\n return _npi.where(condition, x, y, out=None)\n elif isinstance(y, NDArray):\n return _npi.where_lscalar(condition, y, float(x), out=None)\n elif isinstance(x, NDArray):\n return _npi.where_rscalar(condition, x, float(y), out=None)\n else:\n raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))\n\n\n@set_module('mxnet.ndarray.numpy')\ndef polyval(p, x):\n \"\"\"\n Evaluate a polynomial at specific values.\n If p is of length N, this function returns the value:\n p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]\n If x is a sequence, then p(x) is returned for each element of x.\n If x is another polynomial then the composite polynomial p(x(t)) is returned.\n\n Parameters\n ----------\n p : ndarray\n 1D array of polynomial coefficients (including coefficients equal to zero)\n from highest degree to the constant term.\n x : ndarray\n An array of numbers, at which to evaluate p.\n\n Returns\n -------\n values : ndarray\n Result array of polynomials\n\n Notes\n -----\n This function differs from the original `numpy.polyval\n <https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in\n the following way(s):\n - Does not support poly1d.\n - X should be ndarray type even if it contains only one element.\n\n Examples\n --------\n >>> p = np.array([3, 0, 1])\n array([3., 0., 1.])\n >>> x = np.array([5])\n array([5.])\n >>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1\n array([76.])\n >>> x = np.array([5, 4])\n array([5., 4.])\n >>> np.polyval(p, x)\n array([76., 49.])\n \"\"\"\n from ...numpy import ndarray\n if isinstance(p, ndarray) and isinstance(x, ndarray):\n return _npi.polyval(p, x)\n elif not isinstance(p, ndarray) and not isinstance(x, ndarray):\n return _np.polyval(p, x)\n else:\n raise TypeError('type not supported')\n\n\n@set_module('mxnet.ndarray.numpy')\ndef bincount(x, weights=None, minlength=0):\n \"\"\"\n Count number of occurrences of each value in array of non-negative ints.\n\n Parameters\n ----------\n x : ndarray\n input array, 1 dimension, nonnegative ints.\n weights: ndarray\n input weigths same shape as x. (Optional)\n minlength: int\n A minimum number of bins for the output. (Optional)\n\n Returns\n --------\n out : ndarray\n the result of binning the input array. The length of out is equal to amax(x)+1.\n\n Raises\n --------\n Value Error\n If the input is not 1-dimensional, or contains elements with negative values,\n or if minlength is negative\n TypeError\n If the type of the input is float or complex.\n\n Examples\n --------\n >>> np.bincount(np.arange(5))\n array([1, 1, 1, 1, 1])\n >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))\n array([1, 3, 1, 1, 0, 0, 0, 1])\n\n >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])\n >>> np.bincount(x).size == np.amax(x)+1\n True\n\n >>> np.bincount(np.arange(5, dtype=float))\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n TypeError: array cannot be safely cast to required type\n\n >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights\n >>> x = np.array([0, 1, 1, 2, 2, 2])\n >>> np.bincount(x, weights=w)\n array([ 0.3, 0.7, 1.1])\n \"\"\"\n if not isinstance(x, NDArray):\n raise TypeError(\"Input data should be NDarray\")\n if minlength < 0:\n raise ValueError(\"Minlength value should greater than 0\")\n if weights is None:\n return _npi.bincount(x, minlength=minlength, has_weights=False)\n return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)\n" ]
[ [ "numpy.unravel_index", "numpy.reshape", "numpy.around", "numpy.nan_to_num", "numpy.isscalar", "numpy.polyval", "numpy.flip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michigg/web-simultaneous-recording-tool
[ "67db83f6e34d9cb726c69b4e448fed3604a43618" ]
[ "analyser/analysis/pen_calculation_deviation_box_plots.py" ]
[ "\"\"\"\n\n\"\"\"\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import argrelextrema\n\nfrom analysis.frog_click_mean_calculation import calc_click_mean_quantil_based\nfrom utils import dataframe_index, audio_calcs\nfrom utils.data_exporter import Exporter\nfrom utils.data_loader import Loader\n\nimport logging\n\nfrom utils.output import Output\n\n# INPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas-distance_0m-device.pkl'\nINPUT_DEVICES = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Converted/devices-1-aggregated-dbas.pkl'\nOUTPUT_DIR = '/home/michigg/GIT/uni/2021-ma-michael-goetz-data/PensCalibration/Test2/Graphs/Calculations/BoxPlots'\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s]:\\n %(message)s\",\n handlers=[\n logging.FileHandler(f\"{OUTPUT_DIR}/analyse.log\", mode='w'),\n logging.StreamHandler(sys.stdout)\n ]\n)\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', None)\n\n\ndef main():\n devices = Loader.load_analysis_from_pickle(INPUT_DEVICES)\n sample_rate = dataframe_index.get_sample_rate(devices)\n buffer_size = dataframe_index.get_buffer_size(devices)\n dataframes = []\n\n # global max\n result = audio_calcs.calculate_global_max(devices)\n result = result.unstack('PenId')\n result = result.std(axis=1).to_frame()\n result.columns = ['global max']\n dataframes.append(result)\n\n # quantil based deviations\n result = devices.apply(\n calc_click_mean_quantil_based,\n axis=1,\n sample_rate=sample_rate,\n buffer_size=buffer_size,\n db_only=True\n ).to_frame()\n result = result.unstack('PenId')\n result = result.std(axis=1).to_frame()\n result.columns = ['quantile based']\n dataframes.append(result)\n\n # global max based using db_range\n for db_range in [10, 15, 20]:\n result = devices.apply(\n audio_calcs.calc_series_click_mean,\n axis=1,\n sample_rate=sample_rate,\n buffer_size=buffer_size,\n db_range=db_range,\n return_maxima=False\n ).to_frame()\n result = result.unstack('PenId')\n result = result.std(axis=1).to_frame()\n result.columns = [f'{db_range} dB(A) range global max']\n dataframes.append(result)\n\n results = pd.concat(dataframes, axis=1)\n logger.info(results)\n Output.box_plot(\n '',\n # f'Deviations In dB(A) Between Frogs By Calculation Method',\n results,\n file_path=f'{OUTPUT_DIR}',\n file_name=f'box-plot-calculation-methods',\n ignore_clean=True,\n hide_outliers=True\n )\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.set_option", "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Ali-Tahir/spaCy
[ "9e210fa7fdb8e376655e7a7ab7debd3ffd718a63" ]
[ "spacy/cli/pretrain.py" ]
[ "# coding: utf8\nfrom __future__ import print_function, unicode_literals\n\nimport plac\nimport random\nimport numpy\nimport time\nimport re\nfrom collections import Counter\nfrom pathlib import Path\nfrom thinc.v2v import Affine, Maxout\nfrom thinc.misc import LayerNorm as LN\nfrom thinc.neural.util import prefer_gpu\nfrom wasabi import Printer\nimport srsly\n\nfrom ..errors import Errors\nfrom ..tokens import Doc\nfrom ..attrs import ID, HEAD\nfrom .._ml import Tok2Vec, flatten, chain, create_default_optimizer\nfrom .._ml import masked_language_model, get_cossim_loss\nfrom .. import util\nfrom .train import _load_pretrained_tok2vec\n\n\[email protected](\n texts_loc=(\n \"Path to JSONL file with raw texts to learn from, with text provided as the key 'text' or tokens as the \"\n \"key 'tokens'\",\n \"positional\",\n None,\n str,\n ),\n vectors_model=(\"Name or path to spaCy model with vectors to learn from\"),\n output_dir=(\"Directory to write models to on each epoch\", \"positional\", None, str),\n width=(\"Width of CNN layers\", \"option\", \"cw\", int),\n depth=(\"Depth of CNN layers\", \"option\", \"cd\", int),\n cnn_window=(\"Window size for CNN layers\", \"option\", \"cW\", int),\n cnn_pieces=(\"Maxout size for CNN layers. 1 for Mish\", \"option\", \"cP\", int),\n use_chars=(\"Whether to use character-based embedding\", \"flag\", \"chr\", bool),\n sa_depth=(\"Depth of self-attention layers\", \"option\", \"sa\", int),\n bilstm_depth=(\"Depth of BiLSTM layers (requires PyTorch)\", \"option\", \"lstm\", int),\n embed_rows=(\"Number of embedding rows\", \"option\", \"er\", int),\n loss_func=(\n \"Loss function to use for the objective. Either 'L2' or 'cosine'\",\n \"option\",\n \"L\",\n str,\n ),\n use_vectors=(\"Whether to use the static vectors as input features\", \"flag\", \"uv\"),\n dropout=(\"Dropout rate\", \"option\", \"d\", float),\n batch_size=(\"Number of words per training batch\", \"option\", \"bs\", int),\n max_length=(\n \"Max words per example. Longer examples are discarded\",\n \"option\",\n \"xw\",\n int,\n ),\n min_length=(\n \"Min words per example. Shorter examples are discarded\",\n \"option\",\n \"nw\",\n int,\n ),\n seed=(\"Seed for random number generators\", \"option\", \"s\", int),\n n_iter=(\"Number of iterations to pretrain\", \"option\", \"i\", int),\n n_save_every=(\"Save model every X batches.\", \"option\", \"se\", int),\n init_tok2vec=(\n \"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.\",\n \"option\",\n \"t2v\",\n Path,\n ),\n epoch_start=(\n \"The epoch to start counting at. Only relevant when using '--init-tok2vec' and the given weight file has been \"\n \"renamed. Prevents unintended overwriting of existing weight files.\",\n \"option\",\n \"es\",\n int,\n ),\n)\ndef pretrain(\n texts_loc,\n vectors_model,\n output_dir,\n width=96,\n depth=4,\n bilstm_depth=0,\n cnn_pieces=3,\n sa_depth=0,\n use_chars=False,\n cnn_window=1,\n embed_rows=2000,\n loss_func=\"cosine\",\n use_vectors=False,\n dropout=0.2,\n n_iter=1000,\n batch_size=3000,\n max_length=500,\n min_length=5,\n seed=0,\n n_save_every=None,\n init_tok2vec=None,\n epoch_start=None,\n):\n \"\"\"\n Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components,\n using an approximate language-modelling objective. Specifically, we load\n pretrained vectors, and train a component like a CNN, BiLSTM, etc to predict\n vectors which match the pretrained ones. The weights are saved to a directory\n after each epoch. You can then pass a path to one of these pretrained weights\n files to the 'spacy train' command.\n\n This technique may be especially helpful if you have little labelled data.\n However, it's still quite experimental, so your mileage may vary.\n\n To load the weights back in during 'spacy train', you need to ensure\n all settings are the same between pretraining and training. The API and\n errors around this need some improvement.\n \"\"\"\n config = dict(locals())\n for key in config:\n if isinstance(config[key], Path):\n config[key] = str(config[key])\n msg = Printer()\n util.fix_random_seed(seed)\n\n has_gpu = prefer_gpu()\n if has_gpu:\n import torch\n\n torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\n msg.info(\"Using GPU\" if has_gpu else \"Not using GPU\")\n\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n msg.good(\"Created output directory\")\n srsly.write_json(output_dir / \"config.json\", config)\n msg.good(\"Saved settings to config.json\")\n\n # Load texts from file or stdin\n if texts_loc != \"-\": # reading from a file\n texts_loc = Path(texts_loc)\n if not texts_loc.exists():\n msg.fail(\"Input text file doesn't exist\", texts_loc, exits=1)\n with msg.loading(\"Loading input texts...\"):\n texts = list(srsly.read_jsonl(texts_loc))\n if not texts:\n msg.fail(\"Input file is empty\", texts_loc, exits=1)\n msg.good(\"Loaded input texts\")\n random.shuffle(texts)\n else: # reading from stdin\n msg.text(\"Reading input text from stdin...\")\n texts = srsly.read_jsonl(\"-\")\n\n with msg.loading(\"Loading model '{}'...\".format(vectors_model)):\n nlp = util.load_model(vectors_model)\n msg.good(\"Loaded model '{}'\".format(vectors_model))\n pretrained_vectors = None if not use_vectors else nlp.vocab.vectors.name\n model = create_pretraining_model(\n nlp,\n Tok2Vec(\n width,\n embed_rows,\n conv_depth=depth,\n pretrained_vectors=pretrained_vectors,\n bilstm_depth=bilstm_depth, # Requires PyTorch. Experimental.\n subword_features=not use_chars, # Set to False for Chinese etc\n cnn_maxout_pieces=cnn_pieces, # If set to 1, use Mish activation.\n ),\n )\n # Load in pretrained weights\n if init_tok2vec is not None:\n components = _load_pretrained_tok2vec(nlp, init_tok2vec)\n msg.text(\"Loaded pretrained tok2vec for: {}\".format(components))\n # Parse the epoch number from the given weight file\n model_name = re.search(r\"model\\d+\\.bin\", str(init_tok2vec))\n if model_name:\n # Default weight file name so read epoch_start from it by cutting off 'model' and '.bin'\n epoch_start = int(model_name.group(0)[5:][:-4]) + 1\n else:\n if not epoch_start:\n msg.fail(\n \"You have to use the '--epoch-start' argument when using a renamed weight file for \"\n \"'--init-tok2vec'\",\n exits=True,\n )\n elif epoch_start < 0:\n msg.fail(\n \"The argument '--epoch-start' has to be greater or equal to 0. '%d' is invalid\"\n % epoch_start,\n exits=True,\n )\n else:\n # Without '--init-tok2vec' the '--epoch-start' argument is ignored\n epoch_start = 0\n\n optimizer = create_default_optimizer(model.ops)\n tracker = ProgressTracker(frequency=10000)\n msg.divider(\"Pre-training tok2vec layer - starting at epoch %d\" % epoch_start)\n row_settings = {\"widths\": (3, 10, 10, 6, 4), \"aligns\": (\"r\", \"r\", \"r\", \"r\", \"r\")}\n msg.row((\"#\", \"# Words\", \"Total Loss\", \"Loss\", \"w/s\"), **row_settings)\n\n def _save_model(epoch, is_temp=False):\n is_temp_str = \".temp\" if is_temp else \"\"\n with model.use_params(optimizer.averages):\n with (output_dir / (\"model%d%s.bin\" % (epoch, is_temp_str))).open(\n \"wb\"\n ) as file_:\n file_.write(model.tok2vec.to_bytes())\n log = {\n \"nr_word\": tracker.nr_word,\n \"loss\": tracker.loss,\n \"epoch_loss\": tracker.epoch_loss,\n \"epoch\": epoch,\n }\n with (output_dir / \"log.jsonl\").open(\"a\") as file_:\n file_.write(srsly.json_dumps(log) + \"\\n\")\n\n skip_counter = 0\n for epoch in range(epoch_start, n_iter + epoch_start):\n for batch_id, batch in enumerate(\n util.minibatch_by_words(((text, None) for text in texts), size=batch_size)\n ):\n docs, count = make_docs(\n nlp,\n [text for (text, _) in batch],\n max_length=max_length,\n min_length=min_length,\n )\n skip_counter += count\n loss = make_update(\n model, docs, optimizer, objective=loss_func, drop=dropout\n )\n progress = tracker.update(epoch, loss, docs)\n if progress:\n msg.row(progress, **row_settings)\n if texts_loc == \"-\" and tracker.words_per_epoch[epoch] >= 10 ** 7:\n break\n if n_save_every and (batch_id % n_save_every == 0):\n _save_model(epoch, is_temp=True)\n _save_model(epoch)\n tracker.epoch_loss = 0.0\n if texts_loc != \"-\":\n # Reshuffle the texts if texts were loaded from a file\n random.shuffle(texts)\n if skip_counter > 0:\n msg.warn(\"Skipped {count} empty values\".format(count=str(skip_counter)))\n msg.good(\"Successfully finished pretrain\")\n\n\ndef make_update(model, docs, optimizer, drop=0.0, objective=\"L2\"):\n \"\"\"Perform an update over a single batch of documents.\n\n docs (iterable): A batch of `Doc` objects.\n drop (float): The dropout rate.\n optimizer (callable): An optimizer.\n RETURNS loss: A float for the loss.\n \"\"\"\n predictions, backprop = model.begin_update(docs, drop=drop)\n loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)\n backprop(gradients, sgd=optimizer)\n # Don't want to return a cupy object here\n # The gradients are modified in-place by the BERT MLM,\n # so we get an accurate loss\n return float(loss)\n\n\ndef make_docs(nlp, batch, min_length, max_length):\n docs = []\n skip_count = 0\n for record in batch:\n if not isinstance(record, dict):\n raise TypeError(Errors.E137.format(type=type(record), line=record))\n if \"tokens\" in record:\n words = record[\"tokens\"]\n if not words:\n skip_count += 1\n continue\n doc = Doc(nlp.vocab, words=words)\n elif \"text\" in record:\n text = record[\"text\"]\n if not text:\n skip_count += 1\n continue\n doc = nlp.make_doc(text)\n else:\n raise ValueError(Errors.E138.format(text=record))\n if \"heads\" in record:\n heads = record[\"heads\"]\n heads = numpy.asarray(heads, dtype=\"uint64\")\n heads = heads.reshape((len(doc), 1))\n doc = doc.from_array([HEAD], heads)\n if len(doc) >= min_length and len(doc) < max_length:\n docs.append(doc)\n return docs, skip_count\n\n\ndef get_vectors_loss(ops, docs, prediction, objective=\"L2\"):\n \"\"\"Compute a mean-squared error loss between the documents' vectors and\n the prediction.\n\n Note that this is ripe for customization! We could compute the vectors\n in some other word, e.g. with an LSTM language model, or use some other\n type of objective.\n \"\"\"\n # The simplest way to implement this would be to vstack the\n # token.vector values, but that's a bit inefficient, especially on GPU.\n # Instead we fetch the index into the vectors table for each of our tokens,\n # and look them up all at once. This prevents data copying.\n ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])\n target = docs[0].vocab.vectors.data[ids]\n if objective == \"L2\":\n d_target = prediction - target\n loss = (d_target ** 2).sum()\n elif objective == \"cosine\":\n loss, d_target = get_cossim_loss(prediction, target)\n else:\n raise ValueError(Errors.E142.format(loss_func=objective))\n return loss, d_target\n\n\ndef create_pretraining_model(nlp, tok2vec):\n \"\"\"Define a network for the pretraining. We simply add an output layer onto\n the tok2vec input model. The tok2vec input model needs to be a model that\n takes a batch of Doc objects (as a list), and returns a list of arrays.\n Each array in the output needs to have one row per token in the doc.\n \"\"\"\n output_size = nlp.vocab.vectors.data.shape[1]\n output_layer = chain(\n LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0)\n )\n # This is annoying, but the parser etc have the flatten step after\n # the tok2vec. To load the weights in cleanly, we need to match\n # the shape of the models' components exactly. So what we cann\n # \"tok2vec\" has to be the same set of processes as what the components do.\n tok2vec = chain(tok2vec, flatten)\n model = chain(tok2vec, output_layer)\n model = masked_language_model(nlp.vocab, model)\n model.tok2vec = tok2vec\n model.output_layer = output_layer\n model.begin_training([nlp.make_doc(\"Give it a doc to infer shapes\")])\n return model\n\n\nclass ProgressTracker(object):\n def __init__(self, frequency=1000000):\n self.loss = 0.0\n self.prev_loss = 0.0\n self.nr_word = 0\n self.words_per_epoch = Counter()\n self.frequency = frequency\n self.last_time = time.time()\n self.last_update = 0\n self.epoch_loss = 0.0\n\n def update(self, epoch, loss, docs):\n self.loss += loss\n self.epoch_loss += loss\n words_in_batch = sum(len(doc) for doc in docs)\n self.words_per_epoch[epoch] += words_in_batch\n self.nr_word += words_in_batch\n words_since_update = self.nr_word - self.last_update\n if words_since_update >= self.frequency:\n wps = words_since_update / (time.time() - self.last_time)\n self.last_update = self.nr_word\n self.last_time = time.time()\n loss_per_word = self.loss - self.prev_loss\n status = (\n epoch,\n self.nr_word,\n _smart_round(self.loss, width=10),\n _smart_round(loss_per_word, width=6),\n int(wps),\n )\n self.prev_loss = float(self.loss)\n return status\n else:\n return None\n\n\ndef _smart_round(figure, width=10, max_decimal=4):\n \"\"\"Round large numbers as integers, smaller numbers as decimals.\"\"\"\n n_digits = len(str(int(figure)))\n n_decimal = width - (n_digits + 1)\n if n_decimal <= 1:\n return str(int(figure))\n else:\n n_decimal = min(n_decimal, max_decimal)\n format_str = \"%.\" + str(n_decimal) + \"f\"\n return format_str % figure\n" ]
[ [ "numpy.asarray", "torch.set_default_tensor_type" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
k-stacke/ssl-pathology
[ "d440ce11712a5c1b6631d698dc3cafe7c04e2786" ]
[ "simclr/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.cuda import amp\nfrom torchvision.models import resnet50\n\nclass Identity(nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\nclass Model(nn.Module):\n def __init__(self, feature_dim=128, pretrained=False):\n super(Model, self).__init__()\n\n self.f = resnet50(pretrained=pretrained)\n self.f.fc = Identity()\n # projection head\n self.g = nn.Sequential(nn.Linear(2048, 512, bias=False),\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Linear(512, feature_dim, bias=True))\n\n @amp.autocast()\n def forward(self, x):\n x = self.f(x)\n feature = torch.flatten(x, start_dim=1)\n out = self.g(feature)\n return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.BatchNorm1d", "torch.cuda.amp.autocast", "torch.nn.Linear", "torch.flatten", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dina-fouad/pyccel
[ "f4d919e673b400442b9c7b81212b6fbef749c7b7", "f4d919e673b400442b9c7b81212b6fbef749c7b7", "f4d919e673b400442b9c7b81212b6fbef749c7b7", "f4d919e673b400442b9c7b81212b6fbef749c7b7" ]
[ "tests/codegen/ccode/scripts/arrays_pointers.py", "tests/codegen/fcode/scripts/complex_numbers.py", "doc/scripts/scripts/decorators_elemental.py", "tests/epyccel/modules/openmp.py" ]
[ "# pylint: disable=missing-function-docstring, missing-module-docstring/\n#==============================================================================\n\ndef allocatable_to_pointer():\n\n from numpy import array\n\n a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n c = a #pylint:disable=unused-variable\n\ndef pointer_to_pointer():\n\n from numpy import array\n\n a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n b = a\n c = b #pylint:disable=unused-variable\n\ndef reassign_pointers():\n\n from numpy import array\n\n a = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n b = a #pylint:disable=unused-variable\n b = a[1:]\n", "# pylint: disable=missing-function-docstring, missing-module-docstring/\nfrom numpy import array\nfrom numpy import zeros\nfrom numpy import ones\nx1 = 1+3j\nx2 = complex(1,3)\nx3 = [complex(1,1)]*10\nx4 = [1+2j]*10\nx5 = array([1+2j,2+3j])\nx6 = zeros((100,100),'complex')\nx7 = ones((100,100),'complex')\n", "from numpy import array\nfrom numpy import zeros_like\n\n@elemental\n@types(float)\ndef square(x):\n s = x*x\n return s\n\na = 2.0\nb = square(a)\nprint(b)\n\nxs = array([1., 2., 3.])\nys = zeros_like(xs)\nys = square(xs)\nprint(ys)\n", "# pylint: disable=missing-function-docstring, missing-module-docstring/\n# pylint: disable=wildcard-import\nfrom pyccel.decorators import types\n\n@types(int)\ndef set_num_threads(n):\n from pyccel.stdlib.internal.openmp import omp_set_num_threads\n omp_set_num_threads(n)\n\n@types()\ndef get_num_threads():\n from pyccel.stdlib.internal.openmp import omp_get_num_threads\n #$ omp parallel\n n = omp_get_num_threads()\n #$ omp end parallel\n return n\n\ndef get_max_threads():\n from pyccel.stdlib.internal.openmp import omp_get_max_threads\n max_threads = omp_get_max_threads()\n\n return max_threads\n\n@types('int')\ndef f1(i):\n from pyccel.stdlib.internal.openmp import omp_get_thread_num\n out = -1\n #$ omp parallel private(idx)\n idx = omp_get_thread_num()\n\n if idx == i:\n out = idx\n\n #$ omp end parallel\n return out\n\ndef directive_in_else(x : int):\n result = 0\n if x < 30:\n return x\n else:\n #$ omp parallel\n #$ omp for reduction(+:result)\n for i in range(x):\n result = result + i\n #$ omp end parallel\n\n return result\n\ndef test_omp_number_of_procs():\n from pyccel.stdlib.internal.openmp import omp_get_num_procs\n procs_num = omp_get_num_procs()\n return procs_num\n\ndef test_omp_in_parallel1():\n from pyccel.stdlib.internal.openmp import omp_in_parallel\n in_parallel = omp_in_parallel()\n return in_parallel\n\ndef test_omp_in_parallel2():\n from pyccel.stdlib.internal.openmp import omp_in_parallel\n #$ omp parallel\n in_parallel = omp_in_parallel()\n #$ omp end parallel\n return in_parallel\n\n@types ('bool')\ndef test_omp_set_get_dynamic(dynamic_theads):\n from pyccel.stdlib.internal.openmp import omp_set_dynamic, omp_get_dynamic\n omp_set_dynamic(dynamic_theads)\n return omp_get_dynamic()\n\n@types ('bool')\ndef test_omp_set_get_nested(nested):\n from pyccel.stdlib.internal.openmp import omp_set_nested, omp_get_nested\n omp_set_nested(nested)\n return omp_get_nested()\n\ndef test_omp_get_cancellation():\n from pyccel.stdlib.internal.openmp import omp_get_cancellation\n cancel_var = omp_get_cancellation()\n return cancel_var\n\ndef test_omp_get_thread_limit():\n from pyccel.stdlib.internal.openmp import omp_get_thread_limit\n #$ omp parallel\n maximum_threads_available = omp_get_thread_limit()\n #$ omp end parallel\n return maximum_threads_available\n\n@types ('int')\ndef test_omp_get_set_max_active_levels(max_active_levels):\n from pyccel.stdlib.internal.openmp import omp_get_max_active_levels, omp_set_max_active_levels\n omp_set_max_active_levels(max_active_levels)\n max_active_levels_var = omp_get_max_active_levels()\n return max_active_levels_var\n\ndef test_omp_get_level():\n from pyccel.stdlib.internal.openmp import omp_get_level\n #$ omp parallel\n #$ omp parallel\n nested_parallel_regions = omp_get_level()\n #$ omp end parallel\n #$ omp end parallel\n return nested_parallel_regions\n\ndef test_omp_get_active_level():\n from pyccel.stdlib.internal.openmp import omp_get_active_level\n #$ omp parallel\n #$ omp parallel\n active_level_vars = omp_get_active_level()\n #$ omp end parallel\n #$ omp end parallel\n return active_level_vars\n\ndef test_omp_get_ancestor_thread_num():\n from pyccel.stdlib.internal.openmp import omp_get_ancestor_thread_num, omp_get_active_level\n #$ omp parallel\n active_level = omp_get_active_level()\n ancestor_thread = omp_get_ancestor_thread_num(active_level)\n #$ omp end parallel\n return ancestor_thread\n\ndef test_omp_get_team_size():\n from pyccel.stdlib.internal.openmp import omp_get_team_size, omp_get_active_level\n #$ omp parallel\n active_level = omp_get_active_level()\n team_size = omp_get_team_size(active_level)\n #$ omp end parallel\n return team_size\n\ndef test_omp_in_final():\n from pyccel.stdlib.internal.openmp import omp_in_final\n x = 20\n z = 0\n result = 0\n\n #$ omp parallel\n #$ omp single\n #$ omp task final(i >= 10)\n for i in range(x):\n z = z + i\n if omp_in_final() == 1:\n result = 1\n #$ omp end task\n #$ omp end single\n #$ omp end parallel\n return result\n\ndef test_omp_get_proc_bind():\n from pyccel.stdlib.internal.openmp import omp_get_proc_bind\n\n bind_var = omp_get_proc_bind()\n return bind_var\n\n#The function give som errors\n# def test_omp_places():\n# from pyccel.stdlib.internal.openmp import omp_get_partition_num_places\n# from pyccel.stdlib.internal.openmp import omp_get_partition_place_nums\n# from pyccel.stdlib.internal.openmp import omp_get_place_num\n# from pyccel.stdlib.internal.openmp import omp_get_place_proc_ids\n# from pyccel.stdlib.internal.openmp import omp_get_place_num_procs\n# from pyccel.stdlib.internal.openmp import omp_get_num_places\n#\n# partition_num_places = omp_get_partition_num_places()\n# #partition_places_num = omp_get_partition_place_nums(0)\n# place_num = omp_get_place_num()\n# if place_num < 0:\n# return -1\n# #place_num, ids = omp_get_place_proc_ids(place_num, ids)\n# procs = omp_get_place_num_procs(place_num)\n# num_places = omp_get_num_places()\n# return place_num\n\n@types ('int')\ndef test_omp_set_get_default_device(device_num):\n from pyccel.stdlib.internal.openmp import omp_get_default_device\n from pyccel.stdlib.internal.openmp import omp_set_default_device\n omp_set_default_device(device_num)\n default_device = omp_get_default_device()\n return default_device\n\ndef test_omp_get_num_devices():\n from pyccel.stdlib.internal.openmp import omp_get_num_devices\n num_devices = omp_get_num_devices()\n return num_devices\n\ndef test_omp_get_num_teams():\n from pyccel.stdlib.internal.openmp import omp_get_num_teams\n #$ omp teams num_teams(2)\n num_teams = omp_get_num_teams()\n #$ omp end teams\n return num_teams\n\n@types('int')\ndef test_omp_get_team_num(i):\n from pyccel.stdlib.internal.openmp import omp_get_team_num\n out = -1\n #$ omp teams num_teams(2)\n team_num = omp_get_team_num()\n if team_num == i:\n out = team_num\n #$ omp end teams\n return out\n\ndef test_omp_is_initial_device():\n from pyccel.stdlib.internal.openmp import omp_is_initial_device\n is_task_in_init_device = omp_is_initial_device()\n return is_task_in_init_device\n\ndef test_omp_get_initial_device():\n from pyccel.stdlib.internal.openmp import omp_get_initial_device\n #$ omp target\n host_device = omp_get_initial_device()\n #$ omp end target\n return host_device\n\ndef test_omp_get_set_schedule():\n from pyccel.stdlib.internal.openmp import omp_get_schedule, omp_set_schedule\n result = 0\n #$ omp parallel private(i)\n #$ omp for schedule(runtime) reduction (+:sum)\n omp_set_schedule(2, 2)\n schedule_kind = 0\n chunk_size = 0\n omp_get_schedule(schedule_kind, chunk_size)\n for i in range(16):\n result = result + i\n #$ omp end for nowait\n return True\n\ndef test_omp_get_max_task_priority():\n from pyccel.stdlib.internal.openmp import omp_get_max_task_priority\n result = 0\n max_task_priority_var = 0\n #$ omp parallel\n #$ omp single\n #$ omp task\n max_task_priority_var = omp_get_max_task_priority()\n #$ omp end task\n #$ omp end single\n #$ omp end parallel\n return max_task_priority_var\n\n@types('real[:,:], real[:,:], real[:,:]')\ndef omp_matmul(A, x, out):\n #$ omp parallel shared(A,x,out) private(i,j,k)\n #$ omp for\n for i in range(len(A)):# pylint: disable=C0200\n for j in range(len(x[0])):# pylint: disable=C0200\n for k in range(len(x)):# pylint: disable=C0200\n out[i][j] += A[i][k] * x[k][j]\n #$ omp end parallel\n #to let the function compile using epyccel issue #468\n \"bypass issue #468\" # pylint: disable=W0105\n\n@types('real[:,:], real[:,:], real[:,:]')\ndef omp_matmul_single(A, x, out):\n from numpy import matmul\n #$ omp parallel\n #$ omp single\n out[:] = matmul(A, x)\n #$ omp end single\n #$ omp end parallel\n #to let the function compile using epyccel issue #468\n \"bypass issue #468\" # pylint: disable=W0105\n\n\n@types('int[:]', 'int[:]', 'real[:]')\ndef omp_nowait(x, y, z):\n #$ omp parallel\n #$ omp for nowait\n for i in range(0, 1000):\n y[i] = x[i] * 2\n #$ omp for nowait\n for j in range(0, 1000):\n z[j] = x[j] / 2\n #$ omp end parallel\n \"bypass issue #468\" # pylint: disable=W0105\n\n@types('int[:]')\ndef omp_arraysum(x):\n result = 0\n #$ omp parallel private(i)\n #$ omp for reduction (+:result)\n for i in range(0, 5):\n result += x[i]\n #$ omp end parallel\n return result\n\n@types('int[:]')\ndef omp_arraysum_combined(x):\n result = 0\n #$ omp parallel for reduction (+:result)\n for i in range(0, 5):\n result += x[i]\n return result\n\n@types('int')\ndef omp_range_sum_critical(x):\n result = 0\n #$ omp parallel for num_threads(4) shared(result)\n for i in range(0, x):\n #$ omp critical\n result += i\n #$ omp end critical\n return result\n\n\n@types('int[:]')\ndef omp_arraysum_single(x):\n result = 0\n #$ omp parallel\n #$ omp single\n for i in range(0, 10):\n result += x[i]\n #$ omp end single\n #$ omp end parallel\n return result\n\ndef omp_master():\n result = 30\n #$omp parallel num_threads(3) reduction(+:result)\n #$omp master\n result += 1\n #$omp end master\n #$omp end parallel\n return result\n\n@types('int')\ndef omp_taskloop(n):\n result = 0\n #$omp parallel num_threads(n)\n #$omp taskloop\n for i in range(0, 10): # pylint: disable=unused-variable\n #$omp atomic\n result = result + 1\n #$omp end parallel\n return result\n\n@types('int')\ndef omp_tasks(x):\n @types('int', results='int')\n def fib(n):\n if n < 2:\n return n\n #$ omp task shared(i) firstprivate(n)\n i = fib(n-1)\n #$ omp end task\n #$ omp task shared(j) firstprivate(n)\n j = fib(n-2)\n #$ omp end task\n #$ omp taskwait\n return i + j\n #$ omp parallel\n #$ omp single\n m = fib(x)\n #$ omp end single\n #$ omp end parallel\n return m\n\n@types('int')\ndef omp_simd(n):\n from numpy import zeros\n result = 0\n arr = zeros(n, dtype=int)\n #$ omp parallel num_threads(4)\n #$ omp simd\n for i in range(0, n):\n arr[i] = i\n #$ omp end parallel\n for i in range(0, n):\n result = result + arr[i]\n return result\n\ndef omp_flush():\n from pyccel.stdlib.internal.openmp import omp_get_thread_num\n flag = 0\n #$ omp parallel num_threads(2)\n if omp_get_thread_num() == 0:\n #$ omp atomic update\n flag = flag + 1\n elif omp_get_thread_num() == 1:\n #$ omp flush(flag)\n while flag < 1:\n pass\n #$ omp flush(flag)\n #$ omp atomic update\n flag = flag + 1\n #$ omp end parallel\n return flag\n\ndef omp_barrier():\n from numpy import zeros\n arr = zeros(1000, dtype=int)\n result = 0\n #$ omp parallel num_threads(3)\n #$ omp for\n for i in range(0, 1000):\n arr[i] = i * 2\n\n #$ omp barrier\n #$ omp for reduction(+:result)\n for i in range(0, 1000):\n result = result + arr[i]\n #$ omp end parallel\n return result\n\ndef combined_for_simd():\n import numpy as np\n x = np.array([1,2,1,2,1,2,1,2])\n y = np.array([2,1,2,1,2,1,2,1])\n z = np.zeros(8, dtype = int)\n result = 0\n #$ omp parallel for simd\n for i in range(0, 8):\n z[i] = x[i] + y[i]\n\n for i in range(0, 8):\n result = result + z[i]\n return result\n\ndef omp_sections():\n n = 8\n sum1 = 0\n sum2 = 0\n sum3 = 0\n #$ omp parallel num_threads(2)\n #$ omp sections\n\n #$ omp section\n for i in range(0, int(n/3)):\n sum1 = sum1 + i\n #$ omp end section\n\n #$ omp section\n for i in range(0, int(n/2)):\n sum2 = sum2 + i\n #$ omp end section\n\n #$ omp section\n for i in range(0, n):\n sum3 = sum3 + i\n #$ omp end section\n #$ omp end sections\n\n #$ omp end parallel\n\n return (sum1 + sum2 + sum3)\n" ]
[ [ "numpy.array" ], [ "numpy.array", "numpy.zeros", "numpy.ones" ], [ "numpy.array", "numpy.zeros_like" ], [ "numpy.array", "numpy.matmul", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MaxCodeXTC/symmetrynet
[ "f42810be95ecaa85a32a836213cb8d0687184574" ]
[ "train.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Training and Evaluate the Neural Network\nUsage:\n train.py [options] <yaml-config>\n train.py (-h | --help )\n\nArguments:\n yaml-config Path to the yaml hyper-parameter file\n\nOptions:\n -h --help Show this screen.\n -d --devices <devices> Comma seperated GPU devices [default: 0]\n -i --identifier <identifier> Folder name [default: default-identifier]\n --from <checkpoint> Path to a checkpoint\n --ignore-optim Ignore optim when restoring from a checkpoint\n\"\"\"\n\nimport datetime\nimport glob\nimport os\nimport os.path as osp\nimport platform\nimport pprint\nimport random\nimport shlex\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport threading\n\nimport numpy as np\nimport torch\nimport yaml\nfrom docopt import docopt\n\nimport sym\nfrom sym.config import CI, CM, CO, C, load_config\nfrom sym.datasets import ShapeNetDataset\n\n\ndef git_hash():\n cmd = 'git log -n 1 --pretty=\"%h\"'\n ret = subprocess.check_output(shlex.split(cmd)).strip()\n if isinstance(ret, bytes):\n ret = ret.decode()\n return ret\n\n\ndef get_outdir(identifier):\n # load config\n name = str(datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\"))\n name += \"-%s\" % git_hash()\n name += \"-%s\" % identifier\n outdir = osp.join(osp.expanduser(CI.logdir), name)\n if not osp.exists(outdir):\n os.makedirs(outdir)\n C.to_yaml(osp.join(outdir, \"config.yaml\"))\n os.system(f\"git diff HEAD > {outdir}/gitdiff.patch\")\n os.system(f\"find -name '*.py' -print0 | tar -cJf {outdir}/src.tar.xz --null -T -\")\n return outdir\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef main():\n args = docopt(__doc__)\n config_file = args[\"<yaml-config>\"] or \"config/shapenet.yaml\"\n C.update(C.from_yaml(filename=config_file))\n if args[\"--from\"]:\n C.io.resume_from = args[\"--from\"]\n CI.update(C.io)\n CM.update(C.model)\n CO.update(C.optim)\n pprint.pprint(C, indent=4)\n resume_from = CI.resume_from\n\n # WARNING: still not deterministic\n random.seed(0)\n np.random.seed(0)\n torch.manual_seed(0)\n\n device_name = \"cpu\"\n num_gpus = args[\"--devices\"].count(\",\") + 1\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args[\"--devices\"]\n if torch.cuda.is_available():\n device_name = \"cuda\"\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = True\n torch.cuda.manual_seed(0)\n print(\"Let's use\", torch.cuda.device_count(), \"GPU(s)!\")\n else:\n print(\"CUDA is not available\")\n device = torch.device(device_name)\n\n # 1. dataset\n batch_size = CM.batch_size * num_gpus\n datadir = CI.datadir\n kwargs = {\n \"batch_size\": batch_size,\n \"num_workers\": CI.num_workers,\n \"pin_memory\": True,\n }\n if CI.dataset == \"ShapeNet\":\n Dataset = ShapeNetDataset\n else:\n raise NotImplementedError\n\n train_loader = torch.utils.data.DataLoader(\n Dataset(datadir, split=\"train\"), shuffle=True, **kwargs\n )\n val_loader = torch.utils.data.DataLoader(\n Dataset(datadir, split=\"valid\"), shuffle=False, **kwargs\n )\n\n if resume_from:\n print(\"Restoring from\", resume_from)\n checkpoint = torch.load(resume_from)\n\n # 2. model\n model = sym.models.SymmetryNet().to(device)\n print(\"# of params:\", count_parameters(model))\n model = sym.utils.MyDataParallel(\n model, device_ids=list(range(args[\"--devices\"].count(\",\") + 1))\n )\n if resume_from:\n for module_name in list(checkpoint[\"model_state_dict\"].keys()):\n if module_name.startswith(\"module.backbone.volume_network.fc\"):\n del checkpoint[\"model_state_dict\"][module_name]\n model.load_state_dict(checkpoint[\"model_state_dict\"], strict=False)\n\n # 3. optimizer\n if CO.name == \"Adam\":\n optim = torch.optim.Adam(model.parameters(), **CO.params)\n elif CO.name == \"SGD\":\n optim = torch.optim.SGD(model.parameters(), **CO.params)\n else:\n raise NotImplementedError\n\n if resume_from and not args[\"--ignore-optim\"]:\n optim.load_state_dict(checkpoint[\"optim_state_dict\"])\n outdir = get_outdir(args[\"--identifier\"])\n shutil.copyfile(config_file, osp.join(outdir, \"config_origin.yaml\"))\n print(\"outdir:\", outdir)\n\n try:\n trainer = sym.trainer.Trainer(\n device=device,\n model=model,\n optimizer=optim,\n train_loader=train_loader,\n val_loader=val_loader,\n batch_size=batch_size,\n out=outdir,\n )\n trainer.train()\n except BaseException:\n if len(glob.glob(f\"{outdir}/viz/*\")) <= 1:\n shutil.rmtree(outdir)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.cuda.manual_seed", "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.cuda.is_available", "torch.device", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
williamscales/pytopocomplexity
[ "f739b7695066f5da40a9610d21579983a12e76ad" ]
[ "tests/test_entropy.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for `pytopocomplexity.entropy`\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import (ascii, bytes, chr, dict, filter, hex, input, int,\n map, next, oct, open, pow, range, round, str,\n super, zip)\n\nimport numpy as np\nfrom numpy.random import random_sample\n\nfrom pytopocomplexity.entropy import estimate_entropy\n\n\ndef test_entropy_is_zero_for_unimodal_function():\n \"\"\"Test that the entropy of a function with one extremum is zero.\"\"\"\n def func_one_min(x):\n \"\"\"Objective function with global minimum at ``x == 0``.\"\"\"\n return x**2\n #initial_models = 2*random_sample((100,100)) - 1\n initial_models = 2*random_sample(100) - 1\n entropy = estimate_entropy(func_one_min, initial_models, 1e-8, 1e5)\n assert entropy == 0\n" ]
[ [ "numpy.random.random_sample" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jkomyno/lattice-submodular-maximization
[ "e03c8bcc5fcf5bf79a6ae81f145757cf3fdff7cb" ]
[ "python/benchmark/utils/powerset.py" ]
[ "import numpy as np\nimport itertools\nfrom nptyping import NDArray\nfrom typing import Iterator\nfrom ..objective import Objective\n\n\ndef powerset(f: Objective) -> Iterator[NDArray[int]]:\n \"\"\"\n Inumerate b^n possible vectors in the integer lattice.\n :param f: integer-lattice submodular function objective\n \"\"\"\n return map(lambda t: np.array([*t]),\n itertools.product(range(f.b + 1), repeat=f.n))\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JingqingZ/tensorlayer2
[ "289a0402bd64f6a423aa574f10ac8ad8efcb7b66", "289a0402bd64f6a423aa574f10ac8ad8efcb7b66", "289a0402bd64f6a423aa574f10ac8ad8efcb7b66" ]
[ "examples/basic_tutorials/tutorial_cifar10_placeholder.py", "tests/layers/test_layers_merge.py", "tests/layers/test_layers_lambda.py" ]
[ "#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\n\ntf.logging.set_verbosity(tf.logging.DEBUG)\ntl.logging.set_verbosity(tl.logging.DEBUG)\n\nsess = tf.InteractiveSession()\n\nX_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)\n\n\ndef model(x, y_, reuse):\n W_init = tf.truncated_normal_initializer(stddev=5e-2)\n W_init2 = tf.truncated_normal_initializer(stddev=0.04)\n b_init2 = tf.constant_initializer(value=0.1)\n with tf.variable_scope(\"model\", reuse=reuse):\n net = InputLayer(x, name='input')\n net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')\n net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')\n\n net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')\n net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')\n\n net = FlattenLayer(net, name='flatten')\n net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')\n net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')\n net = DenseLayer(net, 10, act=None, W_init=W_init2, name='output')\n y = net.outputs\n\n ce = tl.cost.cross_entropy(y, y_, name='cost')\n # L2 for the MLP, without this, the accuracy will be reduced by 15%.\n L2 = 0\n for p in tl.layers.get_variables_with_name('relu/W', True, True):\n L2 += tf.contrib.layers.l2_regularizer(0.004)(p)\n cost = ce + L2\n\n correct_prediction = tf.equal(tf.argmax(y, 1), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return net, cost, acc\n\n\ndef model_batch_norm(x, y_, reuse, is_train):\n \"\"\"Batch normalization should be placed before rectifier.\"\"\"\n W_init = tf.truncated_normal_initializer(stddev=5e-2)\n W_init2 = tf.truncated_normal_initializer(stddev=0.04)\n b_init2 = tf.constant_initializer(value=0.1)\n with tf.variable_scope(\"model\", reuse=reuse):\n net = InputLayer(x, name='input')\n net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')\n net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch1')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')\n\n net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')\n net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch2')\n net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')\n\n net = FlattenLayer(net, name='flatten') # output: (batch_size, 2304)\n net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')\n net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')\n net = DenseLayer(net, 10, act=None, W_init=W_init2, name='output')\n y = net.outputs\n\n ce = tl.cost.cross_entropy(y, y_, name='cost')\n # L2 for the MLP, without this, the accuracy will be reduced by 15%.\n L2 = 0\n for p in tl.layers.get_variables_with_name('relu/W', True, True):\n L2 += tf.contrib.layers.l2_regularizer(0.004)(p)\n cost = ce + L2\n\n correct_prediction = tf.equal(tf.argmax(y, 1), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return net, cost, acc\n\n\ndef distort_fn(x, is_train=False):\n \"\"\"\n The images are processed as follows:\n .. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.\n .. They are approximately whitened to make the model insensitive to dynamic range.\n For training, we additionally apply a series of random distortions to\n artificially increase the data set size:\n .. Randomly flip the image from left to right.\n .. Randomly distort the image brightness.\n \"\"\"\n # print('begin',x.shape, np.min(x), np.max(x))\n x = tl.prepro.crop(x, 24, 24, is_random=is_train)\n # print('after crop',x.shape, np.min(x), np.max(x))\n if is_train:\n # x = tl.prepro.zoom(x, zoom_range=(0.9, 1.0), is_random=True)\n # print('after zoom', x.shape, np.min(x), np.max(x))\n x = tl.prepro.flip_axis(x, axis=1, is_random=True)\n # print('after flip',x.shape, np.min(x), np.max(x))\n x = tl.prepro.brightness(x, gamma=0.1, gain=1, is_random=True)\n # print('after brightness',x.shape, np.min(x), np.max(x))\n # tmp = np.max(x)\n # x += np.random.uniform(-20, 20)\n # x /= tmp\n # normalize the image\n x = (x - np.mean(x)) / max(np.std(x), 1e-5) # avoid values divided by 0\n # print('after norm', x.shape, np.min(x), np.max(x), np.mean(x))\n return x\n\n\nx = tf.placeholder(dtype=tf.float32, shape=[None, 24, 24, 3], name='x')\ny_ = tf.placeholder(dtype=tf.int64, shape=[None], name='y_')\n\n# using local response normalization\n# network, cost, _ = model(x, y_, False)\n# _, cost_test, acc = model(x, y_, True)\n# you may want to try batch normalization\nnetwork, cost, _ = model_batch_norm(x, y_, False, is_train=True)\n_, cost_test, acc = model_batch_norm(x, y_, True, is_train=False)\n\n# train\nn_epoch = 50000\nlearning_rate = 0.0001\nprint_freq = 1\nbatch_size = 128\n\ntrain_params = network.all_params\ntrain_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08,\n use_locking=False).minimize(cost, var_list=train_params)\n\nsess.run(tf.global_variables_initializer())\n\nnetwork.print_params(False)\nnetwork.print_layers()\n\nprint(' learning_rate: %f' % learning_rate)\nprint(' batch_size: %d' % batch_size)\n\nfor epoch in range(n_epoch):\n start_time = time.time()\n for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):\n X_train_a = tl.prepro.threading_data(X_train_a, fn=distort_fn, is_train=True) # data augmentation for training\n sess.run(train_op, feed_dict={x: X_train_a, y_: y_train_a})\n\n if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:\n print(\"Epoch %d of %d took %fs\" % (epoch + 1, n_epoch, time.time() - start_time))\n # train_loss, train_acc, n_batch = 0, 0, 0\n # for X_train_a, y_train_a in tl.iterate.minibatches(\n # X_train, y_train, batch_size, shuffle=True):\n # X_train_a = tl.prepro.threading_data(X_train_a, fn=distort_fn, is_train=False) # central crop\n # err, ac = sess.run([cost_test, acc], feed_dict={x: X_train_a, y_: y_train_a})\n # train_loss += err; train_acc += ac; n_batch += 1\n # print(\" train loss: %f\" % (train_loss/ n_batch))\n # print(\" train acc: %f\" % (train_acc/ n_batch))\n test_loss, test_acc, n_batch = 0, 0, 0\n for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False):\n X_test_a = tl.prepro.threading_data(X_test_a, fn=distort_fn, is_train=False) # central crop\n err, ac = sess.run([cost_test, acc], feed_dict={x: X_test_a, y_: y_test_a})\n test_loss += err\n test_acc += ac\n n_batch += 1\n print(\" test loss: %f\" % (test_loss / n_batch))\n print(\" test acc: %f\" % (test_acc / n_batch))\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\n\nfrom tests.utils import CustomTestCase\n\n\nclass Layer_Merge_Test(CustomTestCase):\n\n @classmethod\n def setUpClass(cls):\n pass\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def test_concat(self):\n\n class CustomModel(tl.models.Model):\n def __init__(self):\n super(CustomModel, self).__init__(name=\"custom\")\n self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1')\n self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1')\n self.concat = tl.layers.Concat(concat_dim=1, name='concat_layer')\n\n def forward(self, inputs):\n d1 = self.dense1(inputs)\n d2 = self.dense2(inputs)\n outputs = self.concat([d1, d2])\n return outputs\n\n model = CustomModel()\n model.train()\n inputs = tf.convert_to_tensor(np.random.random([4, 20]).astype(np.float32))\n outputs = model(inputs)\n print(model)\n\n self.assertEqual(outputs.get_shape().as_list(), [4, 20])\n\n def test_elementwise(self):\n class CustomModel(tl.models.Model):\n def __init__(self):\n super(CustomModel, self).__init__(name=\"custom\")\n self.dense1 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu1_1')\n self.dense2 = tl.layers.Dense(in_channels=20, n_units=10, act=tf.nn.relu, name='relu2_1')\n self.element = tl.layers.Elementwise(combine_fn=tf.minimum, name='minimum', act=tf.identity)\n\n def forward(self, inputs):\n d1 = self.dense1(inputs)\n d2 = self.dense2(inputs)\n outputs = self.element([d1, d2])\n return outputs, d1, d2\n\n model = CustomModel()\n model.train()\n inputs = tf.convert_to_tensor(np.random.random([4, 20]).astype(np.float32))\n outputs, d1, d2 = model(inputs)\n print(model)\n\n min = tf.minimum(d1, d2)\n self.assertEqual(outputs.get_shape().as_list(), [4, 10])\n self.assertTrue(np.array_equal(min.numpy(), outputs.numpy()))\n\nif __name__ == '__main__':\n\n unittest.main()\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\nimport numpy as np\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\nimport tensorlayer as tl\n\nfrom tests.utils import CustomTestCase\n\n\nclass Layer_Lambda_Test(CustomTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.data_x = np.random.random([100, 1]).astype(np.float32)\n cls.data_y = cls.data_x**3 + np.random.random() * cls.data_x**2 + np.random.random() * cls.data_x\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def test_lambda_keras(self):\n layers = [\n tf.keras.layers.Dense(10, activation=tf.nn.relu),\n tf.keras.layers.Dense(5, activation=tf.nn.sigmoid),\n tf.keras.layers.Dense(1, activation=tf.identity)\n ]\n perceptron = tf.keras.Sequential(layers)\n # in order to get trainable_variables of keras\n _ = perceptron(np.random.random([100, 5]).astype(np.float32))\n\n class CustomizeModel(tl.models.Model):\n def __init__(self):\n super(CustomizeModel, self).__init__()\n self.dense = tl.layers.Dense(in_channels=1, n_units=5)\n self.lambdalayer = tl.layers.Lambda(perceptron, perceptron.trainable_variables)\n\n def forward(self, x):\n z = self.dense(x)\n z = self.lambdalayer(z)\n return z\n\n optimizer = tf.optimizers.Adam(learning_rate=0.1)\n\n model = CustomizeModel()\n print(model.lambdalayer)\n\n model.train()\n\n for epoch in range(10):\n with tf.GradientTape() as tape:\n pred_y = model(self.data_x)\n loss = tl.cost.mean_squared_error(pred_y, self.data_y)\n\n gradients = tape.gradient(loss, model.weights)\n optimizer.apply_gradients(zip(gradients, model.weights))\n\n print(\"epoch %d, loss %f\" % (epoch, loss))\n\n def test_lambda_func_with_args(self):\n def customize_func(x, foo=42):\n if foo == 0:\n return tf.nn.relu(x)\n elif foo == 1:\n return tf.nn.sigmoid(x)\n else:\n return tf.identity(x)\n\n class CustomizeModel(tl.models.Model):\n def __init__(self):\n super(CustomizeModel, self).__init__()\n self.dense = tl.layers.Dense(in_channels=1, n_units=5)\n self.lambdalayer = tl.layers.Lambda(customize_func, fn_weights=[], fn_args={'foo': 0})\n\n def forward(self, x, bar):\n z = self.dense(x)\n if bar == -1:\n zf = self.lambdalayer(z)\n else:\n zf = self.lambdalayer(z, foo=bar)\n return z, zf\n\n model = CustomizeModel()\n print(model.lambdalayer)\n model.train()\n\n out, out2 = model(self.data_x, bar=-1)\n self.assertTrue(np.array_equal(out2.numpy(), tf.nn.relu(out).numpy()))\n out, out2 = model(self.data_x, bar=0)\n self.assertTrue(np.array_equal(out2.numpy(), tf.nn.relu(out).numpy()))\n out, out2 = model(self.data_x, bar=1)\n self.assertTrue(np.array_equal(out2.numpy(), tf.nn.sigmoid(out).numpy()))\n out, out2 = model(self.data_x, bar=2)\n self.assertTrue(np.array_equal(out2.numpy(), out.numpy()))\n\n def test_lambda_func_without_args(self):\n\n class CustomizeModel(tl.models.Model):\n def __init__(self):\n super(CustomizeModel, self).__init__()\n self.dense = tl.layers.Dense(in_channels=1, n_units=5)\n self.lambdalayer = tl.layers.Lambda(lambda x: 2*x, fn_weights=[])\n\n def forward(self, x):\n z = self.dense(x)\n zf = self.lambdalayer(z)\n return z, zf\n\n model = CustomizeModel()\n print(model.lambdalayer)\n model.train()\n\n out, out2 = model(self.data_x)\n self.assertTrue(np.array_equal(out2.numpy(), out.numpy()*2))\n\n def test_elementwiselambda_func_with_args(self):\n\n def customize_func(noise, mean, std, foo=42):\n return mean + noise * tf.exp(std * 0.5) + foo\n\n class CustomizeModel(tl.models.Model):\n def __init__(self):\n super(CustomizeModel, self).__init__()\n self.dense1 = tl.layers.Dense(in_channels=1, n_units=5)\n self.dense2 = tl.layers.Dense(in_channels=1, n_units=5)\n self.dense3 = tl.layers.Dense(in_channels=1, n_units=5)\n self.lambdalayer = tl.layers.ElementwiseLambda(customize_func, fn_weights=[], fn_args={'foo': 1024})\n\n def forward(self, x, bar=None):\n noise = self.dense1(x)\n mean = self.dense2(x)\n std = self.dense3(x)\n if bar is None:\n out = self.lambdalayer([noise, mean, std])\n else:\n out = self.lambdalayer([noise, mean, std], foo=bar)\n return noise, mean, std, out\n\n model = CustomizeModel()\n print(model.lambdalayer)\n model.train()\n\n noise, mean, std, out = model(self.data_x)\n self.assertTrue(np.allclose(out.numpy(), customize_func(noise, mean, std, foo=1024).numpy()))\n noise, mean, std, out = model(self.data_x, bar=2048)\n self.assertTrue(np.allclose(out.numpy(), customize_func(noise, mean, std, foo=2048).numpy()))\n\n def test_elementwiselambda_func_without_args(self):\n\n def customize_func(noise, mean, std):\n return mean + noise * tf.exp(std * 0.5)\n\n class CustomizeModel(tl.models.Model):\n def __init__(self):\n super(CustomizeModel, self).__init__()\n self.dense1 = tl.layers.Dense(in_channels=1, n_units=5)\n self.dense2 = tl.layers.Dense(in_channels=1, n_units=5)\n self.dense3 = tl.layers.Dense(in_channels=1, n_units=5)\n self.lambdalayer = tl.layers.ElementwiseLambda(customize_func, fn_weights=[])\n\n def forward(self, x):\n noise = self.dense1(x)\n mean = self.dense2(x)\n std = self.dense3(x)\n out = self.lambdalayer([noise, mean, std])\n return noise, mean, std, out\n\n model = CustomizeModel()\n print(model.lambdalayer)\n model.train()\n\n noise, mean, std, out = model(self.data_x)\n self.assertTrue(np.array_equal(out.numpy(), customize_func(noise, mean, std).numpy()))\n\n\nif __name__ == '__main__':\n\n unittest.main()\n" ]
[ [ "tensorflow.InteractiveSession", "tensorflow.cast", "tensorflow.truncated_normal_initializer", "tensorflow.placeholder", "tensorflow.constant_initializer", "tensorflow.global_variables_initializer", "numpy.std", "numpy.mean", "tensorflow.logging.set_verbosity", "tensorflow.train.AdamOptimizer", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.variable_scope", "tensorflow.argmax" ], [ "tensorflow.minimum", "numpy.random.random" ], [ "tensorflow.nn.relu", "numpy.random.random", "tensorflow.nn.sigmoid", "tensorflow.keras.layers.Dense", "tensorflow.keras.Sequential", "tensorflow.identity", "tensorflow.exp", "tensorflow.optimizers.Adam", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
hotchilianalytics/zipline-broker
[ "fb475cf89ec8886db4ee6420bd9aca70c1821eab" ]
[ "tests/test_finance.py" ]
[ "#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTests for the zipline.finance package\n\"\"\"\nfrom datetime import datetime, timedelta\nimport os\n\nfrom nose.tools import timed\nfrom nose.tools import nottest\n\nimport numpy as np\nimport pandas as pd\nimport pytz\nfrom six import iteritems\nfrom six.moves import range\nfrom testfixtures import TempDirectory\n\nfrom zipline.finance.blotter.simulation_blotter import SimulationBlotter\nfrom zipline.finance.execution import MarketOrder, LimitOrder\nfrom zipline.finance.metrics import MetricsTracker, load as load_metrics_set\nfrom zipline.finance.trading import SimulationParameters\nfrom zipline.data.us_equity_pricing import BcolzDailyBarReader\nfrom zipline.data.minute_bars import BcolzMinuteBarReader\nfrom zipline.data.data_portal import DataPortal\nfrom zipline.data.us_equity_pricing import BcolzDailyBarWriter\nfrom zipline.finance.slippage import FixedSlippage, FixedBasisPointsSlippage\nfrom zipline.finance.asset_restrictions import NoRestrictions\nfrom zipline.protocol import BarData\nfrom zipline.testing import write_bcolz_minute_data\nimport zipline.testing.fixtures as zf\nimport zipline.utils.factory as factory\n\nDEFAULT_TIMEOUT = 15 # seconds\nEXTENDED_TIMEOUT = 90\n\n_multiprocess_can_split_ = False\n\n\nclass FinanceTestCase(zf.WithAssetFinder,\n zf.WithTradingCalendars,\n zf.ZiplineTestCase):\n ASSET_FINDER_EQUITY_SIDS = 1, 2, 133\n start = START_DATE = pd.Timestamp('2006-01-01', tz='utc')\n end = END_DATE = pd.Timestamp('2006-12-31', tz='utc')\n\n def init_instance_fixtures(self):\n super(FinanceTestCase, self).init_instance_fixtures()\n self.zipline_test_config = {'sid': 133}\n\n # TODO: write tests for short sales\n # TODO: write a test to do massive buying or shorting.\n\n @timed(DEFAULT_TIMEOUT)\n @nottest\n def test_partially_filled_orders(self):\n\n # create a scenario where order size and trade size are equal\n # so that orders must be spread out over several trades.\n params = {\n 'trade_count': 360,\n 'trade_interval': timedelta(minutes=1),\n 'order_count': 2,\n 'order_amount': 100,\n 'order_interval': timedelta(minutes=1),\n # because we placed two orders for 100 shares each, and the volume\n # of each trade is 100, and by default you can take up 10% of the\n # bar's volume (per FixedBasisPointsSlippage, the default slippage\n # model), the simulator should spread the order into 20 trades of\n # 10 shares per order.\n 'expected_txn_count': 20,\n 'expected_txn_volume': 2 * 100,\n 'default_slippage': True\n }\n\n self.transaction_sim(**params)\n\n # same scenario, but with short sales\n params2 = {\n 'trade_count': 360,\n 'trade_interval': timedelta(minutes=1),\n 'order_count': 2,\n 'order_amount': -100,\n 'order_interval': timedelta(minutes=1),\n 'expected_txn_count': 20,\n 'expected_txn_volume': 2 * -100,\n 'default_slippage': True\n }\n\n self.transaction_sim(**params2)\n\n @timed(DEFAULT_TIMEOUT)\n @nottest\n def test_collapsing_orders(self):\n # create a scenario where order.amount <<< trade.volume\n # to test that several orders can be covered properly by one trade,\n # but are represented by multiple transactions.\n params1 = {\n 'trade_count': 6,\n 'trade_interval': timedelta(hours=1),\n 'order_count': 24,\n 'order_amount': 1,\n 'order_interval': timedelta(minutes=1),\n # because we placed an orders totaling less than 25% of one trade\n # the simulator should produce just one transaction.\n 'expected_txn_count': 24,\n 'expected_txn_volume': 24\n }\n self.transaction_sim(**params1)\n\n # second verse, same as the first. except short!\n params2 = {\n 'trade_count': 6,\n 'trade_interval': timedelta(hours=1),\n 'order_count': 24,\n 'order_amount': -1,\n 'order_interval': timedelta(minutes=1),\n 'expected_txn_count': 24,\n 'expected_txn_volume': -24\n }\n self.transaction_sim(**params2)\n\n # Runs the collapsed trades over daily trade intervals.\n # Ensuring that our delay works for daily intervals as well.\n params3 = {\n 'trade_count': 6,\n 'trade_interval': timedelta(days=1),\n 'order_count': 24,\n 'order_amount': 1,\n 'order_interval': timedelta(minutes=1),\n 'expected_txn_count': 24,\n 'expected_txn_volume': 24\n }\n self.transaction_sim(**params3)\n\n @timed(DEFAULT_TIMEOUT)\n @nottest\n def test_alternating_long_short(self):\n # create a scenario where we alternate buys and sells\n params1 = {\n 'trade_count': int(6.5 * 60 * 4),\n 'trade_interval': timedelta(minutes=1),\n 'order_count': 4,\n 'order_amount': 10,\n 'order_interval': timedelta(hours=24),\n 'alternate': True,\n 'complete_fill': True,\n 'expected_txn_count': 4,\n 'expected_txn_volume': 0 # equal buys and sells\n }\n self.transaction_sim(**params1)\n\n def transaction_sim(self, **params):\n \"\"\"This is a utility method that asserts expected\n results for conversion of orders to transactions given a\n trade history\n \"\"\"\n trade_count = params['trade_count']\n trade_interval = params['trade_interval']\n order_count = params['order_count']\n order_amount = params['order_amount']\n order_interval = params['order_interval']\n expected_txn_count = params['expected_txn_count']\n expected_txn_volume = params['expected_txn_volume']\n\n # optional parameters\n # ---------------------\n # if present, alternate between long and short sales\n alternate = params.get('alternate')\n\n # if present, expect transaction amounts to match orders exactly.\n complete_fill = params.get('complete_fill')\n\n asset1 = self.asset_finder.retrieve_asset(1)\n with TempDirectory() as tempdir:\n\n if trade_interval < timedelta(days=1):\n sim_params = factory.create_simulation_parameters(\n start=self.start,\n end=self.end,\n data_frequency=\"minute\"\n )\n\n minutes = self.trading_calendar.minutes_window(\n sim_params.first_open,\n int((trade_interval.total_seconds() / 60) * trade_count)\n + 100)\n\n price_data = np.array([10.1] * len(minutes))\n assets = {\n asset1.sid: pd.DataFrame({\n \"open\": price_data,\n \"high\": price_data,\n \"low\": price_data,\n \"close\": price_data,\n \"volume\": np.array([100] * len(minutes)),\n \"dt\": minutes\n }).set_index(\"dt\")\n }\n\n write_bcolz_minute_data(\n self.trading_calendar,\n self.trading_calendar.sessions_in_range(\n self.trading_calendar.minute_to_session_label(\n minutes[0]\n ),\n self.trading_calendar.minute_to_session_label(\n minutes[-1]\n )\n ),\n tempdir.path,\n iteritems(assets),\n )\n\n equity_minute_reader = BcolzMinuteBarReader(tempdir.path)\n\n data_portal = DataPortal(\n self.asset_finder, self.trading_calendar,\n first_trading_day=equity_minute_reader.first_trading_day,\n equity_minute_reader=equity_minute_reader,\n )\n else:\n sim_params = factory.create_simulation_parameters(\n data_frequency=\"daily\"\n )\n\n days = sim_params.sessions\n\n assets = {\n 1: pd.DataFrame({\n \"open\": [10.1] * len(days),\n \"high\": [10.1] * len(days),\n \"low\": [10.1] * len(days),\n \"close\": [10.1] * len(days),\n \"volume\": [100] * len(days),\n \"day\": [day.value for day in days]\n }, index=days)\n }\n\n path = os.path.join(tempdir.path, \"testdata.bcolz\")\n BcolzDailyBarWriter(path, self.trading_calendar, days[0],\n days[-1]).write(\n assets.items()\n )\n\n equity_daily_reader = BcolzDailyBarReader(path)\n\n data_portal = DataPortal(\n self.asset_finder, self.trading_calendar,\n first_trading_day=equity_daily_reader.first_trading_day,\n equity_daily_reader=equity_daily_reader,\n )\n\n if \"default_slippage\" not in params or \\\n not params[\"default_slippage\"]:\n slippage_func = FixedBasisPointsSlippage()\n else:\n slippage_func = None\n\n blotter = SimulationBlotter(slippage_func)\n\n start_date = sim_params.first_open\n\n if alternate:\n alternator = -1\n else:\n alternator = 1\n\n tracker = MetricsTracker(\n trading_calendar=self.trading_calendar,\n first_session=sim_params.start_session,\n last_session=sim_params.end_session,\n capital_base=sim_params.capital_base,\n emission_rate=sim_params.emission_rate,\n data_frequency=sim_params.data_frequency,\n asset_finder=self.asset_finder,\n metrics=load_metrics_set('none'),\n )\n\n # replicate what tradesim does by going through every minute or day\n # of the simulation and processing open orders each time\n if sim_params.data_frequency == \"minute\":\n ticks = minutes\n else:\n ticks = days\n\n transactions = []\n\n order_list = []\n order_date = start_date\n for tick in ticks:\n blotter.current_dt = tick\n if tick >= order_date and len(order_list) < order_count:\n # place an order\n direction = alternator ** len(order_list)\n order_id = blotter.order(\n asset1,\n order_amount * direction,\n MarketOrder(),\n )\n order_list.append(blotter.orders[order_id])\n order_date = order_date + order_interval\n # move after market orders to just after market next\n # market open.\n if order_date.hour >= 21:\n if order_date.minute >= 00:\n order_date = order_date + timedelta(days=1)\n order_date = order_date.replace(hour=14, minute=30)\n else:\n bar_data = BarData(\n data_portal=data_portal,\n simulation_dt_func=lambda: tick,\n data_frequency=sim_params.data_frequency,\n trading_calendar=self.trading_calendar,\n restrictions=NoRestrictions(),\n )\n txns, _, closed_orders = blotter.get_transactions(bar_data)\n for txn in txns:\n tracker.process_transaction(txn)\n transactions.append(txn)\n\n blotter.prune_orders(closed_orders)\n\n for i in range(order_count):\n order = order_list[i]\n self.assertEqual(order.asset, asset1)\n self.assertEqual(order.amount, order_amount * alternator ** i)\n\n if complete_fill:\n self.assertEqual(len(transactions), len(order_list))\n\n total_volume = 0\n for i in range(len(transactions)):\n txn = transactions[i]\n total_volume += txn.amount\n if complete_fill:\n order = order_list[i]\n self.assertEqual(order.amount, txn.amount)\n\n self.assertEqual(total_volume, expected_txn_volume)\n\n self.assertEqual(len(transactions), expected_txn_count)\n\n if total_volume == 0:\n self.assertRaises(KeyError, lambda: tracker.positions[asset1])\n else:\n cumulative_pos = tracker.positions[asset1]\n self.assertEqual(total_volume, cumulative_pos.amount)\n\n # the open orders should not contain the asset.\n oo = blotter.open_orders\n self.assertNotIn(\n asset1,\n oo,\n \"Entry is removed when no open orders\"\n )\n\n def test_blotter_processes_splits(self):\n blotter = SimulationBlotter(equity_slippage=FixedSlippage())\n\n # set up two open limit orders with very low limit prices,\n # one for sid 1 and one for sid 2\n asset1 = self.asset_finder.retrieve_asset(1)\n asset2 = self.asset_finder.retrieve_asset(2)\n asset133 = self.asset_finder.retrieve_asset(133)\n\n blotter.order(asset1, 100, LimitOrder(10, asset=asset1))\n blotter.order(asset2, 100, LimitOrder(10, asset=asset2))\n\n # send in splits for assets 133 and 2. We have no open orders for\n # asset 133 so it should be ignored.\n blotter.process_splits([(asset133, 0.5), (asset2, 0.3333)])\n\n for asset in [asset1, asset2]:\n order_lists = blotter.open_orders[asset]\n self.assertIsNotNone(order_lists)\n self.assertEqual(1, len(order_lists))\n\n asset1_order = blotter.open_orders[1][0]\n asset2_order = blotter.open_orders[2][0]\n\n # make sure the asset1 order didn't change\n self.assertEqual(100, asset1_order.amount)\n self.assertEqual(10, asset1_order.limit)\n self.assertEqual(1, asset1_order.asset)\n\n # make sure the asset2 order did change\n # to 300 shares at 3.33\n self.assertEqual(300, asset2_order.amount)\n self.assertEqual(3.33, asset2_order.limit)\n self.assertEqual(2, asset2_order.asset)\n\n\nclass SimParamsTestCase(zf.WithTradingCalendars, zf.ZiplineTestCase):\n \"\"\"\n Tests for date management utilities in zipline.finance.trading.\n \"\"\"\n def test_simulation_parameters(self):\n sp = SimulationParameters(\n start_session=pd.Timestamp(\"2008-01-01\", tz='UTC'),\n end_session=pd.Timestamp(\"2008-12-31\", tz='UTC'),\n capital_base=100000,\n trading_calendar=self.trading_calendar,\n )\n\n self.assertTrue(sp.last_close.month == 12)\n self.assertTrue(sp.last_close.day == 31)\n\n @timed(DEFAULT_TIMEOUT)\n def test_sim_params_days_in_period(self):\n\n # January 2008\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5\n # 6 7 8 9 10 11 12\n # 13 14 15 16 17 18 19\n # 20 21 22 23 24 25 26\n # 27 28 29 30 31\n\n params = SimulationParameters(\n start_session=pd.Timestamp(\"2007-12-31\", tz='UTC'),\n end_session=pd.Timestamp(\"2008-01-07\", tz='UTC'),\n capital_base=100000,\n trading_calendar=self.trading_calendar,\n )\n\n expected_trading_days = (\n datetime(2007, 12, 31, tzinfo=pytz.utc),\n # Skip new years\n # holidays taken from: http://www.nyse.com/press/1191407641943.html\n datetime(2008, 1, 2, tzinfo=pytz.utc),\n datetime(2008, 1, 3, tzinfo=pytz.utc),\n datetime(2008, 1, 4, tzinfo=pytz.utc),\n # Skip Saturday\n # Skip Sunday\n datetime(2008, 1, 7, tzinfo=pytz.utc)\n )\n\n num_expected_trading_days = 5\n self.assertEquals(\n num_expected_trading_days,\n len(params.sessions)\n )\n np.testing.assert_array_equal(expected_trading_days,\n params.sessions.tolist())\n" ]
[ [ "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anonymous29387491/iclr2022
[ "60c5727f8519e64610b632d074510587fb7ff692", "60c5727f8519e64610b632d074510587fb7ff692" ]
[ "Tests/attribution_calculation/ShapleyExcess/iterate_drug.py", "Tests/attribution_calculation/Interaction-kernelshap/iterate_census_intKS.py" ]
[ "from torchvision import datasets, transforms\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nfrom argparse import ArgumentParser \r\nfrom tqdm import tqdm\r\nimport time\r\nimport numpy as np\r\n\r\n\r\n###########\r\n# file imports / path issues\r\nimport os\r\nimport sys\r\nfrom pathlib import Path\r\n\r\npath = Path(os.path.abspath(__file__)).parents[3]\r\nos.chdir(path)\r\nsys.path.append('./BivariateShapley')\r\n\r\nfrom utils_shapley import *\r\nfrom shapley_kernel import Bivariate_KernelExplainer\r\n\r\nimport pickle\r\nimport os\r\n\r\n\r\nimport shap\r\n\r\n############################################\r\n# Define Test Parameters\r\n############################################\r\n\r\n\r\nparser = ArgumentParser(description='get phi plus matrices')\r\n\r\nparser.add_argument('--dataset_min_index', type = int,default=0,\r\n help='iterate over dataset starting from min_index')\r\n\r\nparser.add_argument('--dataset_samples', type = int,default=500,\r\n help='number of samples, starting from min_index')\r\nparser.add_argument('--verbose', action='store_true', default=False,\r\n help='boolean, use tqdm')\r\n\r\nargs = parser.parse_args()\r\n\r\nmin_index = args.dataset_min_index\r\nmax_index = min_index + args.dataset_samples\r\n\r\nbaseline = 'excess'\r\nsave_path = './Files/results_attribution/drug_%s' % (baseline)\r\nmake_dir(save_path)\r\nmodel_path = './Files/trained_bb_models/model_drug.pkl'\r\ndata_path = './Files/Data/drug.h5'\r\n\r\n\r\n\r\nfrom shapley_value_functions import *\r\n# load model\r\nimport pickle\r\nwith open(model_path, 'rb') as fid:\r\n model = pickle.load(fid)\r\nmodel_eval = eval_RF_binary(model)\r\n\r\n# Data Sample\r\nfrom shapley_datasets import drug\r\ndataset = drug(data_path = data_path, train = False)\r\ndataloader = DataLoader(dataset, batch_size = 1, shuffle = False, num_workers = 0)\r\n\r\ndataset_train = drug(data_path = data_path, train = True)\r\ndataloader_train = DataLoader(dataset_train, batch_size = 10, shuffle = True, num_workers = 0)\r\ndata_iterator = iter(dataloader_train)\r\n\r\n#######################\r\n# Explainer\r\n#######################\r\n\r\n# initialize variables\r\nx_list = []\r\nlabel_list = []\r\nunary_list = []\r\nmatrix_list = []\r\ntime_list = []\r\n\r\ndb_ind = {}\r\n\r\ntime1 = time.time()\r\nif args.verbose:\r\n batch_iterator = tqdm(enumerate(dataloader), total = max_index)\r\nelse:\r\n batch_iterator = enumerate(dataloader)\r\n\r\nfor idx, (x, label) in batch_iterator:\r\n\r\n # advance batch iterator\r\n if idx < min_index:\r\n continue\r\n elif idx == max_index:\r\n break\r\n\r\n time_start = time.time()\r\n label = label[0].item()\r\n #######################################\r\n # Calculate Shapley\r\n #######################################\r\n baseline_value = 0\r\n ########################################\r\n x = tensor2numpy(x) \r\n x_train = np.zeros_like(x)\r\n n_feat = x.reshape(-1).shape[0]\r\n matrix = np.zeros((n_feat, n_feat))\r\n\r\n model_eval.init_baseline(x, baseline_value = baseline_value)\r\n explainer = shap.KernelExplainer(model_eval, x_train) \r\n shapley_values = explainer.shap_values(x, silent = True, l1_reg = False)\r\n\r\n for i in range(n_feat):\r\n for j in range(i+1, n_feat):\r\n model_eval.init_baseline(x, j = j, i = i, baseline_value = baseline_value)\r\n x_ = np_collapse(x, index = j) # remove column j from x\r\n explainer = shap.KernelExplainer(model_eval, np.zeros_like(x_)+baseline_value)\r\n shapley_coalition = explainer.shap_values(x_, silent = True, l1_reg = False)\r\n shapley_coalition = np_insert(shapley_coalition, np.zeros((x.shape[0], 1)), index = j)\r\n\r\n matrix[i, j] = 0.5 * (shapley_coalition[0,i] - shapley_values[0,i] - shapley_values[0,j])\r\n matrix[j, i] = matrix[i,j]\r\n\r\n #######################################\r\n\r\n\r\n # save individual shapley\r\n time_list.append(time.time() - time_start)\r\n x_list.append(x)\r\n label_list.append(label)\r\n unary_list.append(shapley_values)\r\n matrix_list.append(matrix)\r\n\r\n\r\n\r\n if idx % 5 == 0:\r\n if not args.verbose:\r\n print('=====================')\r\n print('samples:' + str(idx+1))\r\n print('time per sample: ' + str(np.array(time_list).mean()))\r\n '''\r\n db_ind['x_list'] = x_list\r\n db_ind['label_list'] = label_list\r\n db_ind['unary_list'] = unary_list\r\n db_ind['matrix_list'] = matrix_list\r\n db_ind['time'] = time_list\r\n save_dict(db_ind, os.path.join(save_path, '%s-%s_checkpoint.pkl' % (str(min_index), str(max_index-1))))\r\n '''\r\n\r\ndb_ind['x_list'] = x_list\r\ndb_ind['label_list'] = label_list\r\ndb_ind['unary_list'] = unary_list\r\ndb_ind['matrix_list'] = matrix_list\r\ndb_ind['time_list'] = time_list\r\nsave_dict(db_ind, os.path.join(save_path, '%s-%s.pkl' % (str(min_index), str(max_index-1))))\r\n#os.remove(os.path.join(save_path, '%s-%s_checkpoint.pkl' % (str(min_index), str(max_index-1))))\r\nprint('done!')\r\n\r\n", "from torchvision import datasets, transforms\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nfrom argparse import ArgumentParser \r\nfrom tqdm import tqdm\r\nimport time\r\nimport numpy as np\r\n\r\n\r\n###########\r\n# file imports / path issues\r\nimport os\r\nimport sys\r\nfrom pathlib import Path\r\n\r\npath = Path(os.path.abspath(__file__)).parents[3]\r\nos.chdir(path)\r\nsys.path.append('./BivariateShapley')\r\n\r\nfrom utils_shapley import *\r\nfrom shapley_kernel import Bivariate_KernelExplainer\r\n\r\nimport pickle\r\nimport os\r\n\r\n\r\nimport shap\r\n\r\n############################################\r\n# Define Test Parameters\r\n############################################\r\n\r\n\r\nparser = ArgumentParser(description='get phi plus matrices')\r\n\r\nparser.add_argument('--dataset_min_index', type = int,default=0,\r\n help='iterate over dataset starting from min_index')\r\n\r\nparser.add_argument('--dataset_samples', type = int,default=500,\r\n help='number of samples, starting from min_index')\r\nparser.add_argument('--verbose', action='store_true', default=False,\r\n help='boolean, use tqdm')\r\n\r\nargs = parser.parse_args()\r\n\r\nmin_index = args.dataset_min_index\r\nmax_index = min_index + args.dataset_samples\r\n\r\nbaseline = 'intKS'\r\nsave_path = './Files/results_attribution/census_%s' % (baseline)\r\nmake_dir(save_path)\r\nmodel_path = './Files/trained_bb_models/model_census.json'\r\n\r\n\r\n\r\nfrom shapley_value_functions import *\r\n# load model\r\nimport xgboost as xgb\r\nmodel = xgb.Booster()\r\nmodel.load_model(model_path)\r\nmodel_eval = eval_XGB(model)\r\n\r\n# Data Sample\r\nimport pandas as pd\r\ndataset_test = pd.read_pickle('./Files/Data/census_x_test.pkl')\r\nlabels_test = np.loadtxt('./Files/Data/census_y_test.csv')\r\ndataset_train = pd.read_pickle('./Files/Data/census_x_train.pkl')\r\n\r\n#######################\r\n# Explainer\r\n#######################\r\n\r\n# initialize variables\r\nx_list = []\r\nlabel_list = []\r\nunary_list = []\r\nmatrix_list = []\r\ntime_list = []\r\n\r\ndb_ind = {}\r\n\r\nif args.verbose:\r\n iterator = tqdm(range(dataset_test.shape[0]), total = max_index - min_index)\r\nelse:\r\n iterator = range(dataset_test.shape[0])\r\n\r\ndataset_train = pd.read_pickle('./Files/Data/census_x_train.pkl')\r\nbaseline_value = dataset_train.to_numpy().mean(axis = 0).reshape(1,-1)\r\n\r\nfor idx in iterator:\r\n\r\n # advance batch iterator\r\n if idx < min_index:\r\n continue\r\n elif idx == max_index:\r\n break\r\n\r\n\r\n time_start = time.time()\r\n label = labels_test[idx]\r\n #######################################\r\n # Calculate Shapley\r\n #######################################\r\n x = dataset_test.iloc[idx:idx+1,:].to_numpy()\r\n\r\n ########################################\r\n x = tensor2numpy(x) \r\n n_feat = x.reshape(-1).shape[0]\r\n matrix = np.zeros((n_feat, n_feat))\r\n for j in range(n_feat):\r\n # j fixed to present\r\n model_eval.init_baseline(x, j = j, fixed_present = True, baseline_value = baseline_value)\r\n x_ = np_collapse(x, index = j) # remove column j from x\r\n\r\n if type(baseline_value) != int:\r\n baseline_value_ = np_collapse(baseline_value, index = j)\r\n else:\r\n baseline_value_ = baseline_value\r\n\r\n explainer = shap.KernelExplainer(model_eval, baseline_value_)\r\n shapley_values_pos = explainer.shap_values(x_, silent = True, l1_reg = False)\r\n shapley_values_pos = np_insert(shapley_values_pos, np.zeros((x.shape[0], 1)), index = j)\r\n\r\n # j fixed to be absent\r\n model_eval.init_baseline(x, j = j, fixed_present = False, baseine_value = baseline_value)\r\n x_ = np_collapse(x, index = j) # remove column j from x\r\n explainer = shap.KernelExplainer(model_eval,baseline_value_)\r\n shapley_values_neg = explainer.shap_values(x_, silent = True, l1_reg = False)\r\n shapley_values_neg = np_insert(shapley_values_neg, np.zeros((x.shape[0],1)), index = j)\r\n\r\n\r\n matrix[:, j] = 0.5 * (shapley_values_pos - shapley_values_neg)\r\n shapley_values = np.zeros(n_feat)\r\n #######################################\r\n matrix = 0.5*(matrix + matrix.transpose())\r\n\r\n # save individual shapley\r\n time_list.append(time.time() - time_start)\r\n x_list.append(x)\r\n label_list.append(label)\r\n unary_list.append(np.zeros(n_feat))\r\n matrix_list.append(matrix)\r\n\r\n\r\n\r\n if idx % 5 == 0:\r\n if not args.verbose:\r\n print('=====================')\r\n print('samples:' + str(idx+1))\r\n print('time per sample: ' + str(np.array(time_list).mean()))\r\n '''\r\n db_ind['x_list'] = x_list\r\n db_ind['label_list'] = label_list\r\n db_ind['unary_list'] = unary_list\r\n db_ind['matrix_list'] = matrix_list\r\n db_ind['time'] = time_list\r\n save_dict(db_ind, os.path.join(save_path, '%s-%s_checkpoint.pkl' % (str(min_index), str(max_index-1))))\r\n '''\r\n\r\ndb_ind['x_list'] = x_list\r\ndb_ind['label_list'] = label_list\r\ndb_ind['unary_list'] = unary_list\r\ndb_ind['matrix_list'] = matrix_list\r\ndb_ind['time_list'] = time_list\r\nsave_dict(db_ind, os.path.join(save_path, '%s-%s.pkl' % (str(min_index), str(max_index-1))))\r\n#os.remove(os.path.join(save_path, '%s-%s_checkpoint.pkl' % (str(min_index), str(max_index-1))))\r\nprint('done!')\r\n\r\n" ]
[ [ "numpy.array", "torch.utils.data.DataLoader", "numpy.zeros_like", "numpy.zeros" ], [ "numpy.array", "pandas.read_pickle", "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
zhangyuejoslin/Recurrent-VLN-BERT
[ "f9bc81c297d6ad04b6b846b4d702a8f7bb4544ab", "f9bc81c297d6ad04b6b846b4d702a8f7bb4544ab" ]
[ "r2r_src_update/train.py", "r2r_src/vlnbert/vlnbert_PREVALENT.py" ]
[ "import torch\n\nimport os\nimport time\nimport json\nimport random\nimport numpy as np\nfrom collections import defaultdict\n\nfrom utils import read_vocab, write_vocab, build_vocab, padding_idx, timeSince, read_img_features, print_progress, roi_img_features\nimport utils\nfrom env import R2RBatch\nfrom agent import Seq2SeqAgent\nfrom eval import Evaluation\nfrom param import args\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom tensorboardX import SummaryWriter\n\nfrom vlnbert.vlnbert_init import get_tokenizer\n\nlog_dir = '/home/joslin/Recurrent-VLN-BERT/snap/%s' % args.name\nif not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\nIMAGENET_FEATURES = 'img_features/ResNet-152-imagenet.tsv'\nPLACE365_FEATURES = '/home/hlr/shared/data/joslin/img_features/ResNet-152-places365.tsv'\n#PLACE365_FEATURES = '/home/hlr/shared/data/joslin/img_features/CLIP-ViT-B-32-views.tsv'\nresult_path = \"/home/joslin/Recurrent-VLN-BERT/result/\"\nexperiment_time = time.strftime(\"%Y%m%d-%H%M%S\", time.gmtime())\n\nif args.features == 'imagenet':\n features = IMAGENET_FEATURES\nelif args.features == 'places365':\n features = PLACE365_FEATURES\n\nfeedback_method = args.feedback # teacher or sample\n\nprint(args); print('')\n\n\n''' train the listener '''\ndef train(train_env, tok, n_iters, log_every=2000, val_envs={}, aug_env=None):\n writer = SummaryWriter(log_dir=log_dir)\n listner = Seq2SeqAgent(train_env, \"\", tok, args.maxAction)\n\n record_file = open('./logs/' + args.name + '.txt', 'a')\n record_file.write(str(args) + '\\n\\n')\n record_file.close()\n\n start_iter = 0\n if args.load is not None:\n if args.aug is None:\n start_iter = listner.load(os.path.join(args.load))\n print(\"\\nLOAD the model from {}, iteration \".format(args.load, start_iter))\n else:\n load_iter = listner.load(os.path.join(args.load))\n print(\"\\nLOAD the model from {}, iteration \".format(args.load, load_iter))\n\n start = time.time()\n print('\\nListener training starts, start iteration: %s' % str(start_iter))\n\n best_val = {'val_unseen': {\"spl\": 0., \"sr\": 0., \"state\":\"\", 'update':False}}\n\n for idx in range(start_iter, start_iter+n_iters, log_every):\n listner.logs = defaultdict(list)\n interval = min(log_every, n_iters-idx)\n iter = idx + interval\n\n # Train for log_every interval\n if aug_env is None:\n listner.env = train_env\n listner.train(interval, feedback=feedback_method) # Train interval iters\n else:\n jdx_length = len(range(interval // 2))\n for jdx in range(interval // 2):\n # Train with GT data\n listner.env = train_env\n args.ml_weight = 0.2\n listner.train(1, feedback=feedback_method)\n\n # Train with Augmented data\n listner.env = aug_env\n args.ml_weight = 0.2\n listner.train(1, feedback=feedback_method)\n\n print_progress(jdx, jdx_length, prefix='Progress:', suffix='Complete', bar_length=50)\n\n # Log the training stats to tensorboard\n total = max(sum(listner.logs['total']), 1)\n length = max(len(listner.logs['critic_loss']), 1)\n critic_loss = sum(listner.logs['critic_loss']) / total\n RL_loss = sum(listner.logs['RL_loss']) / max(len(listner.logs['RL_loss']), 1)\n IL_loss = sum(listner.logs['IL_loss']) / max(len(listner.logs['IL_loss']), 1)\n entropy = sum(listner.logs['entropy']) / total\n writer.add_scalar(\"loss/critic\", critic_loss, idx)\n writer.add_scalar(\"policy_entropy\", entropy, idx)\n writer.add_scalar(\"loss/RL_loss\", RL_loss, idx)\n writer.add_scalar(\"loss/IL_loss\", IL_loss, idx)\n writer.add_scalar(\"total_actions\", total, idx)\n writer.add_scalar(\"max_length\", length, idx)\n # print(\"total_actions\", total, \", max_length\", length)\n\n # Run validation\n loss_str = \"iter {}\".format(iter)\n for env_name, (env, evaluator) in val_envs.items():\n listner.env = env\n\n # Get validation distance from goal under test evaluation conditions\n listner.test(use_dropout=False, feedback='argmax', iters=None)\n result = listner.get_results()\n score_summary, _ = evaluator.score(result)\n loss_str += \", %s \" % env_name\n for metric, val in score_summary.items():\n if metric in ['spl']:\n writer.add_scalar(\"spl/%s\" % env_name, val, idx)\n if env_name in best_val:\n if val > best_val[env_name]['spl']:\n best_val[env_name]['spl'] = val\n best_val[env_name]['update'] = True\n elif (val == best_val[env_name]['spl']) and (score_summary['success_rate'] > best_val[env_name]['sr']):\n best_val[env_name]['spl'] = val\n best_val[env_name]['update'] = True\n loss_str += ', %s: %.4f' % (metric, val)\n\n record_file = open('./logs/' + args.name + '.txt', 'a')\n record_file.write(loss_str + '\\n')\n record_file.close()\n\n for env_name in best_val:\n if best_val[env_name]['update']:\n best_val[env_name]['state'] = 'Iter %d %s' % (iter, loss_str)\n best_val[env_name]['update'] = False\n listner.save(idx, os.path.join(\"snap\", args.name, \"state_dict\", \"best_%s\" % (env_name)))\n else:\n listner.save(idx, os.path.join(\"snap\", args.name, \"state_dict\", \"latest_dict\"))\n\n print(('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),\n iter, float(iter)/n_iters*100, loss_str)))\n\n with open(result_path+str(experiment_time)+\".txt\", \"a\") as f_result:\n f_result.write(('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),\n iter, float(iter)/n_iters*100, loss_str)))\n f_result.write('\\n')\n\n if iter % 1000 == 0:\n print(\"BEST RESULT TILL NOW\")\n for env_name in best_val:\n print(env_name, best_val[env_name]['state'])\n\n record_file = open('./logs/' + args.name + '.txt', 'a')\n record_file.write('BEST RESULT TILL NOW: ' + env_name + ' | ' + best_val[env_name]['state'] + '\\n')\n record_file.close()\n\n listner.save(idx, os.path.join(\"snap\", args.name, \"state_dict\", \"LAST_iter%d\" % (idx)))\n\n\ndef valid(train_env, tok, val_envs={}):\n agent = Seq2SeqAgent(train_env, \"\", tok, args.maxAction)\n\n print(\"Loaded the listener model at iter %d from %s\" % (agent.load(args.load), args.load))\n\n for env_name, (env, evaluator) in val_envs.items():\n agent.logs = defaultdict(list)\n agent.env = env\n\n iters = None\n agent.test(use_dropout=False, feedback='argmax', iters=iters)\n result = agent.get_results()\n\n if env_name != '':\n score_summary, _ = evaluator.score(result)\n loss_str = \"Env name: %s\" % env_name\n for metric,val in score_summary.items():\n loss_str += ', %s: %.4f' % (metric, val)\n print(loss_str)\n\n # if args.submit:\n json.dump(\n result,\n open(os.path.join(log_dir, \"submit_%s.json\" % env_name), 'w'),\n sort_keys=True, indent=4, separators=(',', ': ')\n )\n \n # YZ: print the sorrted tokens\n '''\n json.dump(\n agent.sort_tokens,\n open(os.path.join(log_dir, \"instr_%s.json\" % env_name), 'w'),\n sort_keys=True, indent=4, separators=(',', ': ')\n )\n '''\n # YZ: output the heatmap of transformer attention\n #np.save(\"/VL/space/zhan1624/Recurrent-VLN-BERT/attent_heatmap/mean/third_steps.npy\", agent.atten_heat, allow_pickle=True)\n # if env_name == \"val_seen\":\n # np.save(\"/VL/space/zhan1624/Recurrent-VLN-BERT/attent_heatmap/all/first_step_original.npy\", agent.obj_token_attn, allow_pickle=True)\n \n\ndef setup():\n torch.manual_seed(1)\n torch.cuda.manual_seed(1)\n random.seed(0)\n np.random.seed(0)\n\ndef train_val(test_only=False):\n ''' Train on the training set, and validate on seen and unseen splits. '''\n setup()\n tok = get_tokenizer(args)\n\n feat_dict = read_img_features(features, test_only=test_only)\n \n if args.using_obj:\n obj_dict = np.load(args.obj_img_feat_path, allow_pickle=True).item()\n else:\n obj_dict = None\n\n if test_only:\n featurized_scans = None\n val_env_names = ['val_train_seen']\n else:\n featurized_scans = set([key.split(\"_\")[0] for key in list(feat_dict.keys())])\n #val_env_names = ['val_train_seen', 'val_seen', 'val_unseen']\n val_env_names = ['val_seen', 'val_unseen']\n\n train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok, obj_store=obj_dict)\n from collections import OrderedDict\n\n if args.submit:\n val_env_names.append('test')\n else:\n pass\n\n val_envs = OrderedDict(\n ((split,\n (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok, obj_store=obj_dict),\n Evaluation([split], featurized_scans, tok))\n )\n for split in val_env_names\n )\n )\n\n if args.train == 'listener':\n train(train_env, tok, args.iters, val_envs=val_envs)\n elif args.train == 'validlistener':\n valid(train_env, tok, val_envs=val_envs)\n else:\n assert False\n\ndef train_val_augment(test_only=False):\n \"\"\"\n Train the listener with the augmented data\n \"\"\"\n setup()\n\n # Create a batch training environment that will also preprocess text\n tok_bert = get_tokenizer(args)\n\n # Load the env img features\n feat_dict = read_img_features(features, test_only=test_only)\n #feat_dict = roi_img_features(features)\n if test_only:\n featurized_scans = None\n val_env_names = ['val_train_seen']\n else:\n featurized_scans = set([key.split(\"_\")[0] for key in list(feat_dict.keys())])\n val_env_names = ['val_seen', 'val_unseen']\n\n # Load the augmentation data\n aug_path = args.aug\n # Create the training environment\n train_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=['train'], tokenizer=tok_bert)\n aug_env = R2RBatch(feat_dict, batch_size=args.batchSize, splits=[aug_path], tokenizer=tok_bert, name='aug')\n\n # Setup the validation data\n val_envs = {split: (R2RBatch(feat_dict, batch_size=args.batchSize, splits=[split], tokenizer=tok_bert),\n Evaluation([split], featurized_scans, tok_bert))\n for split in val_env_names}\n\n # Start training\n train(train_env, tok_bert, args.iters, val_envs=val_envs, aug_env=aug_env)\n\n\nif __name__ == \"__main__\":\n if args.train in ['listener', 'validlistener']:\n train_val(test_only=args.test_only)\n elif args.train == 'auglistener':\n train_val_augment(test_only=args.test_only)\n else:\n assert False\n", "# PREVALENT, 2020, [email protected]\n# Modified in Recurrent VLN-BERT, 2020, [email protected]\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nimport math\nimport os\nimport sys\nfrom io import open\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\nimport sys\nsys.path.append(\"/home/joslin/Recurrent-VLN-BERT/\")\nfrom transformers.pytorch_transformers.modeling_bert import BertPreTrainedModel, BertConfig\nimport pdb\n\nlogger = logging.getLogger(__name__)\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\n\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept (ImportError, AttributeError) as e:\n logger.info(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .\")\nBertLayerNorm = torch.nn.LayerNorm\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n seq_length = input_ids.size(1)\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = True\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask, head_mask=None):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_scores) if self.output_attentions else (context_layer,)\n return outputs\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n\n def forward(self, input_tensor, attention_mask, head_mask=None):\n self_outputs = self.self(input_tensor, attention_mask, head_mask)\n attention_output = self.output(self_outputs[0], input_tensor)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask, head_mask=None):\n attention_outputs = self.attention(hidden_states, attention_mask, head_mask)\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertXAttention(nn.Module):\n def __init__(self, config, ctx_dim=None):\n super().__init__()\n self.att = BertOutAttention(config, ctx_dim=ctx_dim)\n self.output = BertSelfOutput(config)\n\n def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):\n output, attention_scores = self.att(input_tensor, ctx_tensor, ctx_att_mask)\n attention_output = self.output(output, input_tensor)\n return attention_output, attention_scores\n\n\nclass BertOutAttention(nn.Module):\n def __init__(self, config, ctx_dim=None):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n # visual_dim = 2048\n if ctx_dim is None:\n ctx_dim =config.hidden_size\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(ctx_dim, self.all_head_size)\n self.value = nn.Linear(ctx_dim, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, context, attention_mask=None):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(context)\n mixed_value_layer = self.value(context)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n if attention_mask is not None:\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n return context_layer, attention_scores\n\n\nclass LXRTXLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n # Lang self-att and FFN layer\n self.lang_self_att = BertAttention(config)\n self.lang_inter = BertIntermediate(config)\n self.lang_output = BertOutput(config)\n # Visn self-att and FFN layer\n self.visn_self_att = BertAttention(config)\n self.visn_inter = BertIntermediate(config)\n self.visn_output = BertOutput(config)\n # The cross attention layer\n self.visual_attention = BertXAttention(config)\n\n def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):\n ''' Cross Attention -- cross for vision not for language '''\n visn_att_output, attention_scores = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask)\n return visn_att_output, attention_scores\n\n def self_att(self, visn_input, visn_attention_mask):\n ''' Self Attention -- on visual features with language clues '''\n visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)\n return visn_att_output\n\n def output_fc(self, visn_input):\n ''' Feed forward '''\n visn_inter_output = self.visn_inter(visn_input)\n visn_output = self.visn_output(visn_inter_output, visn_input)\n return visn_output\n\n def forward(self, lang_feats, lang_attention_mask,\n visn_feats, visn_attention_mask, tdx):\n \n ''' visual self-attention with state '''\n visn_att_output = torch.cat((lang_feats[:, 0:1, :], visn_feats), dim=1)\n state_vis_mask = torch.cat((lang_attention_mask[:,:,:,0:1], visn_attention_mask), dim=-1)\n \n # ''' state and vision attend to language'''\n visn_att_output, cross_attention_scores = self.cross_att(lang_feats[:, 1:, :], lang_attention_mask[:, :, :, 1:], visn_att_output, state_vis_mask)\n\n language_attention_scores = cross_attention_scores[:, :, 0, :]\n\n state_visn_att_output = self.self_att(visn_att_output, state_vis_mask)\n state_visn_output = self.output_fc(state_visn_att_output[0])\n\n visn_att_output = state_visn_output[:, 1:, :]\n lang_att_output = torch.cat((state_visn_output[:, 0:1, :], lang_feats[:,1:,:]), dim=1)\n\n visual_attention_scores = state_visn_att_output[1][:, :, 0, 1:]\n\n return lang_att_output, visn_att_output, language_attention_scores, visual_attention_scores\n\n\nclass VisionEncoder(nn.Module):\n def __init__(self, vision_size, config):\n super().__init__()\n feat_dim = vision_size\n\n # Object feature encoding\n self.visn_fc = nn.Linear(feat_dim, config.hidden_size)\n self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)\n\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, visn_input):\n feats = visn_input\n\n x = self.visn_fc(feats)\n x = self.visn_layer_norm(x)\n\n output = self.dropout(x)\n return output\n\n\nclass VLNBert(BertPreTrainedModel):\n def __init__(self, config):\n super(VLNBert, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.pooler = BertPooler(config)\n\n self.img_dim = config.img_feature_dim # 2176\n logger.info('VLNBert Image Dimension: {}'.format(self.img_dim))\n self.img_feature_type = config.img_feature_type # ''\n self.vl_layers = config.vl_layers # 4\n self.la_layers = config.la_layers # 9\n self.lalayer = nn.ModuleList(\n [BertLayer(config) for _ in range(self.la_layers)])\n self.addlayer = nn.ModuleList(\n [LXRTXLayer(config) for _ in range(self.vl_layers)])\n self.vision_encoder = VisionEncoder(self.config.img_feature_dim, self.config)\n self.apply(self.init_weights)\n\n def forward(self, mode, input_ids, token_type_ids=None,\n attention_mask=None, lang_mask=None, vis_mask=None, position_ids=None, head_mask=None, img_feats=None):\n\n attention_mask = lang_mask\n\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n head_mask = [None] * self.config.num_hidden_layers\n\n if mode == 'language':\n ''' LXMERT language branch (in VLN only perform this at initialization) '''\n embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)\n text_embeds = embedding_output\n\n for layer_module in self.lalayer: # self attention\n temp_output = layer_module(text_embeds, extended_attention_mask)\n text_embeds = temp_output[0]\n\n sequence_output = text_embeds\n pooled_output = self.pooler(sequence_output)\n\n return pooled_output, sequence_output\n\n elif mode == 'visual':\n ''' LXMERT visual branch (no language processing during navigation) '''\n text_embeds = input_ids\n\n text_mask = extended_attention_mask\n\n img_embedding_output = self.vision_encoder(img_feats)\n img_seq_len = img_feats.shape[1]\n batch_size = text_embeds.size(0)\n\n img_seq_mask = vis_mask\n\n extended_img_mask = img_seq_mask.unsqueeze(1).unsqueeze(2)\n extended_img_mask = extended_img_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_img_mask = (1.0 - extended_img_mask) * -10000.0\n img_mask = extended_img_mask\n\n lang_output = text_embeds\n visn_output = img_embedding_output\n\n for tdx, layer_module in enumerate(self.addlayer):\n lang_output, visn_output, language_attention_scores, visual_attention_scores = layer_module(lang_output, text_mask, visn_output, img_mask, tdx)\n\n sequence_output = lang_output\n pooled_output = self.pooler(sequence_output)\n\n language_state_scores = language_attention_scores.mean(dim=1)\n visual_action_scores = visual_attention_scores.mean(dim=1)\n\n # weighted_feat\n language_attention_probs = nn.Softmax(dim=-1)(language_state_scores.clone()).unsqueeze(-1)\n visual_attention_probs = nn.Softmax(dim=-1)(visual_action_scores.clone()).unsqueeze(-1)\n\n attended_language = (language_attention_probs * text_embeds[:, 1:, :]).sum(1)\n attended_visual = (visual_attention_probs * img_embedding_output).sum(1)\n\n return pooled_output, visual_action_scores, attended_language, attended_visual, language_attention_probs.squeeze(-1)\n" ]
[ [ "torch.manual_seed", "numpy.load", "numpy.random.seed", "torch.cuda.manual_seed" ], [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.sigmoid", "torch.cat", "torch.zeros_like", "torch.nn.Embedding", "torch.nn.Tanh", "torch.nn.Linear", "torch.matmul", "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexjercan/unsupervised-segmentation
[ "172273fef52df3771d8de7c167fb0910f4079733" ]
[ "fcntest.py" ]
[ "from metrics import MetricFunctionNYUv2, print_single_error\nfrom model import SupervisedLossFunction\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom nyuv2 import NYUv2\nfrom tqdm import tqdm\nfrom general import generate_layers, load_checkpoint, tensors_to_device\nimport torch\nfrom torchvision.models.segmentation.segmentation import fcn_resnet50\n\nnum_layers = 3\n\n\ndef runmodel(model, imgs, depths):\n layers = generate_layers(imgs, depths, num_layers)\n x = [model(x)['out'] for x in layers]\n return torch.stack(x, dim=-1)\n\n\ndef run_test_nyuv2(model, dataloader, loss_fn, metric_fn):\n loop = tqdm(dataloader, position=0, leave=True)\n\n for i, tensors in enumerate(loop):\n imgs, seg13, normals, depths = tensors_to_device(tensors, DEVICE)\n with torch.no_grad():\n predictions = runmodel(model, imgs, depths)\n\n loss_fn(predictions, (normals, depths))\n metric_fn.evaluate(predictions, (seg13, normals, depths))\n loop.close()\n\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nmodel = fcn_resnet50(pretrained=False, num_classes=14)\nmodel = model.to(DEVICE)\nepoch_idx, model = load_checkpoint(model, \"fcnmodel.pth\", DEVICE)\n\nt = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])\ntest_dataset = NYUv2(root=\"../NYUv2\", download=True, rgb_transform=t, seg_transform=t, sn_transform=t, depth_transform=t, train=False)\ndataloader = DataLoader(test_dataset, batch_size=2, shuffle=True)\n\nloss_fn = SupervisedLossFunction()\nmetric_fn = MetricFunctionNYUv2(2)\n\nmodel.eval()\nrun_test_nyuv2(model, dataloader, loss_fn, metric_fn)\nprint_single_error(epoch_idx, loss_fn.show(), metric_fn.show())" ]
[ [ "torch.stack", "torch.no_grad", "torch.utils.data.DataLoader", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dccastro/NDFlow
[ "1e46cf00e78068d3c78281b42aa8aaed310e53c9" ]
[ "ndflow/util.py" ]
[ "import os\n\nimport numpy as np\n\nimport ndflow\nfrom ndflow.models.mixture import MixtureModel\n\n\ndef list_images(imgs_dir):\n import SimpleITK as sitk\n\n for filename in os.listdir(imgs_dir):\n path = os.path.join(imgs_dir, filename)\n reader = sitk.ImageFileReader()\n reader.SetFileName(path)\n try:\n reader.ReadImageInformation()\n yield filename\n except RuntimeError:\n continue # Probably not an image file, skip\n\n\ndef list_gmms(gmms_dir):\n return (filename for filename in os.listdir(gmms_dir)\n if filename.endswith(ndflow.GMM_FILENAME_SUFFIX))\n\n\ndef list_matches(matches_dir):\n return (filename for filename in os.listdir(matches_dir)\n if filename.endswith(ndflow.MATCH_FILENAME_SUFFIX))\n\n\ndef quantise(data, levels: int = None):\n \"\"\"Quantise data into discrete values, similarly to a histogram.\n\n Parameters\n ----------\n data : array_like\n Input data array.\n levels : int or None, optional\n Number of levels at which to quantise the data. If `None`, data will be cast to `int` and\n integer values in the data range will be used.\n\n Returns\n -------\n values : np.ndarray\n Values to which `data` was quantised.\n weights : np.ndarray\n Array of counts of items collapsed into each of the `values`.\n \"\"\"\n data = np.asarray(data).flatten()\n if levels is None:\n data = data.astype(int)\n data_min = data.min()\n weights = np.bincount(data - data_min)\n values = np.arange(len(weights), dtype=int) + data_min\n else:\n weights, bins = np.histogram(data, bins=levels, density=False)\n values = .5 * (bins[:-1] + bins[1:]) # Bin centres\n return values, weights\n\n\ndef plot_gmm(gmm: MixtureModel, x, values=None, weights=None, ax=None, **kwargs):\n \"\"\"Plot a Gaussian mixture model (GMM) density.\n\n Parameters\n ----------\n gmm : ndflow.models.mixture.MixtureModel\n x : array_like\n Values at which to evaluate the GMM likelihood.\n values, weights : np.ndarray, optional\n Quantised data distribution as computed by `quantise()`. If given, will plot a histogram\n alongside the GMM density.\n ax : matplotlib.axes.Axes, optional\n Axes onto which to draw. Defaults to the current axes.\n kwargs\n Keyword arguments passed through to the `plot()` call.\n \"\"\"\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n if values is not None and weights is not None:\n # Compute histogram bars' parameters in case values are not evenly spaced\n widths = np.empty(values.shape[0])\n widths[1:] = values[1:] - values[:-1]\n widths[0] = widths[1]\n edges = values - .5 * widths\n heights = weights / (weights.sum() * widths)\n\n ax.bar(edges, heights, widths, align='edge', linewidth=0, alpha=.5)\n\n ax.plot(x, gmm.marginal_likelihood(x), **kwargs)\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.asarray", "numpy.bincount", "numpy.histogram", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AIVIS-inc/mmsegmentation
[ "e2b13de52e970215be566067cab7bd880010f062" ]
[ "mmseg/models/backbones/resnet.py" ]
[ "import warnings\n\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer\nfrom mmcv.runner import BaseModule\nfrom mmcv.utils.parrots_wrapper import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\n\n\nclass BasicBlock(BaseModule):\n \"\"\"Basic block for ResNet.\"\"\"\n\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n dcn=None,\n plugins=None,\n init_cfg=None):\n super(BasicBlock, self).__init__(init_cfg)\n assert dcn is None, 'Not implemented yet.'\n assert plugins is None, 'Not implemented yet.'\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n 3,\n stride=stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = build_conv_layer(\n conv_cfg, planes, planes, 3, padding=1, bias=False)\n self.add_module(self.norm2_name, norm2)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.with_cp = with_cp\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(BaseModule):\n \"\"\"Bottleneck block for ResNet.\n\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if it is\n \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n dcn=None,\n plugins=None,\n init_cfg=None):\n super(Bottleneck, self).__init__(init_cfg)\n assert style in ['pytorch', 'caffe']\n assert dcn is None or isinstance(dcn, dict)\n assert plugins is None or isinstance(plugins, list)\n if plugins is not None:\n allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']\n assert all(p['position'] in allowed_position for p in plugins)\n\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.dcn = dcn\n self.with_dcn = dcn is not None\n self.plugins = plugins\n self.with_plugins = plugins is not None\n\n if self.with_plugins:\n # collect plugins for conv1/conv2/conv3\n self.after_conv1_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv1'\n ]\n self.after_conv2_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv2'\n ]\n self.after_conv3_plugins = [\n plugin['cfg'] for plugin in plugins\n if plugin['position'] == 'after_conv3'\n ]\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n norm_cfg, planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n fallback_on_stride = False\n if self.with_dcn:\n fallback_on_stride = dcn.pop('fallback_on_stride', False)\n if not self.with_dcn or fallback_on_stride:\n self.conv2 = build_conv_layer(\n conv_cfg,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n else:\n assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n self.conv2 = build_conv_layer(\n dcn,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n conv_cfg,\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n\n if self.with_plugins:\n self.after_conv1_plugin_names = self.make_block_plugins(\n planes, self.after_conv1_plugins)\n self.after_conv2_plugin_names = self.make_block_plugins(\n planes, self.after_conv2_plugins)\n self.after_conv3_plugin_names = self.make_block_plugins(\n planes * self.expansion, self.after_conv3_plugins)\n\n def make_block_plugins(self, in_channels, plugins):\n \"\"\"make plugins for block.\n\n Args:\n in_channels (int): Input channels of plugin.\n plugins (list[dict]): List of plugins cfg to build.\n\n Returns:\n list[str]: List of the names of plugin.\n \"\"\"\n assert isinstance(plugins, list)\n plugin_names = []\n for plugin in plugins:\n plugin = plugin.copy()\n name, layer = build_plugin_layer(\n plugin,\n in_channels=in_channels,\n postfix=plugin.pop('postfix', ''))\n assert not hasattr(self, name), f'duplicate plugin {name}'\n self.add_module(name, layer)\n plugin_names.append(name)\n return plugin_names\n\n def forward_plugin(self, x, plugin_names):\n \"\"\"Forward function for plugins.\"\"\"\n out = x\n for name in plugin_names:\n out = getattr(self, name)(x)\n return out\n\n @property\n def norm1(self):\n \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n return getattr(self, self.norm2_name)\n\n @property\n def norm3(self):\n \"\"\"nn.Module: normalization layer after the third convolution layer\"\"\"\n return getattr(self, self.norm3_name)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = self.relu(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n out = self.conv3(out)\n out = self.norm3(out)\n\n if self.with_plugins:\n out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\[email protected]_module()\nclass ResNet(BaseModule):\n \"\"\"ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n stem_channels (int): Number of stem channels. Default: 64.\n base_channels (int): Number of base channels of res layer. Default: 64.\n num_stages (int): Resnet stages, normally 4. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n Default: (1, 2, 2, 2).\n dilations (Sequence[int]): Dilation of each stage.\n Default: (1, 1, 1, 1).\n out_indices (Sequence[int]): Output from which stages.\n Default: (0, 1, 2, 3).\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer. Default: 'pytorch'.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv.\n Default: False.\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters. Default: -1.\n conv_cfg (dict | None): Dictionary to construct and config conv layer.\n When conv_cfg is None, cfg will be set to dict(type='Conv2d').\n Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True).\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n dcn (dict | None): Dictionary to construct and config DCN conv layer.\n When dcn is not None, conv_cfg must be None. Default: None.\n stage_with_dcn (Sequence[bool]): Whether to set DCN conv for each\n stage. The length of stage_with_dcn is equal to num_stages.\n Default: (False, False, False, False).\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n\n - position (str, required): Position inside block to insert plugin,\n options: 'after_conv1', 'after_conv2', 'after_conv3'.\n\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as 'num_stages'.\n Default: None.\n multi_grid (Sequence[int]|None): Multi grid dilation rates of last\n stage. Default: None.\n contract_dilation (bool): Whether contract first dilation of each layer\n Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity. Default: True.\n pretrained (str, optional): model pretrained path. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n\n Example:\n >>> from mmseg.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n \"\"\"\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self,\n depth,\n in_channels=3,\n stem_channels=64,\n base_channels=64,\n num_stages=4,\n strides=(1, 2, 2, 2),\n dilations=(1, 1, 1, 1),\n out_indices=(0, 1, 2, 3),\n style='pytorch',\n deep_stem=False,\n avg_down=False,\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=False,\n dcn=None,\n stage_with_dcn=(False, False, False, False),\n plugins=None,\n multi_grid=None,\n contract_dilation=False,\n with_cp=False,\n zero_init_residual=True,\n pretrained=None,\n init_cfg=None):\n super(ResNet, self).__init__(init_cfg)\n if depth not in self.arch_settings:\n raise KeyError(f'invalid depth {depth} for resnet')\n\n self.pretrained = pretrained\n self.zero_init_residual = zero_init_residual\n block_init_cfg = None\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be setting at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is a deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n elif pretrained is None:\n if init_cfg is None:\n self.init_cfg = [\n dict(type='Kaiming', layer='Conv2d'),\n dict(\n type='Constant',\n val=1,\n layer=['_BatchNorm', 'GroupNorm'])\n ]\n block = self.arch_settings[depth][0]\n if self.zero_init_residual:\n if block is BasicBlock:\n block_init_cfg = dict(\n type='Constant',\n val=0,\n override=dict(name='norm2'))\n elif block is Bottleneck:\n block_init_cfg = dict(\n type='Constant',\n val=0,\n override=dict(name='norm3'))\n else:\n raise TypeError('pretrained must be a str or None')\n\n self.depth = depth\n self.stem_channels = stem_channels\n self.base_channels = base_channels\n self.num_stages = num_stages\n assert num_stages >= 1 and num_stages <= 4\n self.strides = strides\n self.dilations = dilations\n assert len(strides) == len(dilations) == num_stages\n self.out_indices = out_indices\n assert max(out_indices) < num_stages\n self.style = style\n self.deep_stem = deep_stem\n self.avg_down = avg_down\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.with_cp = with_cp\n self.norm_eval = norm_eval\n self.dcn = dcn\n self.stage_with_dcn = stage_with_dcn\n if dcn is not None:\n assert len(stage_with_dcn) == num_stages\n self.plugins = plugins\n self.multi_grid = multi_grid\n self.contract_dilation = contract_dilation\n self.block, stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n self.inplanes = stem_channels\n\n self._make_stem_layer(in_channels, stem_channels)\n\n self.res_layers = []\n for i, num_blocks in enumerate(self.stage_blocks):\n stride = strides[i]\n dilation = dilations[i]\n dcn = self.dcn if self.stage_with_dcn[i] else None\n if plugins is not None:\n stage_plugins = self.make_stage_plugins(plugins, i)\n else:\n stage_plugins = None\n # multi grid is applied to last layer only\n stage_multi_grid = multi_grid if i == len(\n self.stage_blocks) - 1 else None\n planes = base_channels * 2**i\n res_layer = self.make_res_layer(\n block=self.block,\n inplanes=self.inplanes,\n planes=planes,\n num_blocks=num_blocks,\n stride=stride,\n dilation=dilation,\n style=self.style,\n avg_down=self.avg_down,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n dcn=dcn,\n plugins=stage_plugins,\n multi_grid=stage_multi_grid,\n contract_dilation=contract_dilation,\n init_cfg=block_init_cfg)\n self.inplanes = planes * self.block.expansion\n layer_name = f'layer{i+1}'\n self.add_module(layer_name, res_layer)\n self.res_layers.append(layer_name)\n\n self._freeze_stages()\n\n self.feat_dim = self.block.expansion * base_channels * 2**(\n len(self.stage_blocks) - 1)\n\n def make_stage_plugins(self, plugins, stage_idx):\n \"\"\"make plugins for ResNet 'stage_idx'th stage .\n\n Currently we support to insert 'context_block',\n 'empirical_attention_block', 'nonlocal_block' into the backbone like\n ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n Bottleneck.\n\n An example of plugins format could be :\n >>> plugins=[\n ... dict(cfg=dict(type='xxx', arg1='xxx'),\n ... stages=(False, True, True, True),\n ... position='after_conv2'),\n ... dict(cfg=dict(type='yyy'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='1'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='2'),\n ... stages=(True, True, True, True),\n ... position='after_conv3')\n ... ]\n >>> self = ResNet(depth=18)\n >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n >>> assert len(stage_plugins) == 3\n\n Suppose 'stage_idx=0', the structure of blocks in the stage would be:\n conv1-> conv2->conv3->yyy->zzz1->zzz2\n Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n If stages is missing, the plugin would be applied to all stages.\n\n Args:\n plugins (list[dict]): List of plugins cfg to build. The postfix is\n required if multiple same type plugins are inserted.\n stage_idx (int): Index of stage to build\n\n Returns:\n list[dict]: Plugins for current stage\n \"\"\"\n stage_plugins = []\n for plugin in plugins:\n plugin = plugin.copy()\n stages = plugin.pop('stages', None)\n assert stages is None or len(stages) == self.num_stages\n # whether to insert plugin into current stage\n if stages is None or stages[stage_idx]:\n stage_plugins.append(plugin)\n\n return stage_plugins\n\n def make_res_layer(self, **kwargs):\n \"\"\"Pack all blocks in a stage into a ``ResLayer``.\"\"\"\n return ResLayer(**kwargs)\n\n @property\n def norm1(self):\n \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n return getattr(self, self.norm1_name)\n\n def _make_stem_layer(self, in_channels, stem_channels):\n \"\"\"Make stem layer for ResNet.\"\"\"\n if self.deep_stem:\n self.stem = nn.Sequential(\n build_conv_layer(\n self.conv_cfg,\n in_channels,\n stem_channels // 2,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n nn.ReLU(inplace=True),\n build_conv_layer(\n self.conv_cfg,\n stem_channels // 2,\n stem_channels // 2,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n nn.ReLU(inplace=True),\n build_conv_layer(\n self.conv_cfg,\n stem_channels // 2,\n stem_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n build_norm_layer(self.norm_cfg, stem_channels)[1],\n nn.ReLU(inplace=True))\n else:\n self.conv1 = build_conv_layer(\n self.conv_cfg,\n in_channels,\n stem_channels,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=False)\n self.norm1_name, norm1 = build_norm_layer(\n self.norm_cfg, stem_channels, postfix=1)\n self.add_module(self.norm1_name, norm1)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def _freeze_stages(self):\n \"\"\"Freeze stages param and norm stats.\"\"\"\n if self.frozen_stages >= 0:\n if self.deep_stem:\n self.stem.eval()\n for param in self.stem.parameters():\n param.requires_grad = False\n else:\n self.norm1.eval()\n for m in [self.conv1, self.norm1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n if self.deep_stem:\n x = self.stem(x)\n else:\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n outs = []\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = res_layer(x)\n if i in self.out_indices:\n outs.append(x)\n return tuple(outs)\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode while keep normalization layer\n freezed.\"\"\"\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n\n\[email protected]_module()\nclass ResNetV1c(ResNet):\n \"\"\"ResNetV1c variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv\n in the input stem with three 3x3 convs.\n\n References:\n .. [1] https://arxiv.org/pdf/1812.01187.pdf\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1c, self).__init__(\n deep_stem=True, avg_down=False, **kwargs)\n\n\[email protected]_module()\nclass ResNetV1d(ResNet):\n \"\"\"ResNetV1d variant described in [1]_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n \"\"\"\n\n def __init__(self, **kwargs):\n super(ResNetV1d, self).__init__(\n deep_stem=True, avg_down=True, **kwargs)\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.utils.checkpoint.checkpoint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
edawson/parliament2
[ "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e", "2632aa3484ef64c9539c4885026b705b737f6d1e" ]
[ "resources/usr/local/lib/python2.7/dist-packages/sklearn/cluster/bicluster/spectral.py", "resources/usr/local/lib/python2.7/dist-packages/sklearn/cluster/k_means_.py", "resources/usr/local/lib/python2.7/dist-packages/sklearn/feature_selection/rfe.py", "resources/usr/local/lib/python2.7/dist-packages/sklearn/preprocessing/label.py", "resources/usr/lib/python2.7/dist-packages/numpy/numarray/numerictypes.py", "resources/usr/local/lib/python2.7/dist-packages/sklearn/manifold/isomap.py", "resources/usr/local/lib/python2.7/dist-packages/sklearn/tests/test_common.py", "resources/usr/local/lib/python2.7/dist-packages/sklearn/utils/validation.py", "resources/usr/lib/python2.7/dist-packages/numpy/matrixlib/tests/test_defmatrix.py", "resources/usr/local/lib/python2.7/dist-packages/sklearn/cluster/affinity_propagation_.py" ]
[ "\"\"\"Implements spectral biclustering algorithms.\n\nAuthors : Kemal Eren\nLicense: BSD 3 clause\n\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\n\nfrom scipy.sparse import dia_matrix\nfrom scipy.sparse import issparse\n\nfrom sklearn.base import BaseEstimator, BiclusterMixin\nfrom sklearn.externals import six\nfrom sklearn.utils.arpack import svds\nfrom sklearn.utils.arpack import eigsh\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import MiniBatchKMeans\n\nfrom sklearn.utils.extmath import randomized_svd\nfrom sklearn.utils.extmath import safe_sparse_dot\nfrom sklearn.utils.extmath import make_nonnegative\nfrom sklearn.utils.extmath import norm\n\nfrom sklearn.utils.validation import assert_all_finite\nfrom sklearn.utils.validation import check_arrays\n\nfrom .utils import check_array_ndim\n\n\ndef _scale_normalize(X):\n \"\"\"Normalize ``X`` by scaling rows and columns independently.\n\n Returns the normalized matrix and the row and column scaling\n factors.\n\n \"\"\"\n X = make_nonnegative(X)\n row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()\n col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()\n row_diag = np.where(np.isnan(row_diag), 0, row_diag)\n col_diag = np.where(np.isnan(col_diag), 0, col_diag)\n if issparse(X):\n n_rows, n_cols = X.shape\n r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))\n c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))\n an = r * X * c\n else:\n an = row_diag[:, np.newaxis] * X * col_diag\n return an, row_diag, col_diag\n\n\ndef _bistochastic_normalize(X, max_iter=1000, tol=1e-5):\n \"\"\"Normalize rows and columns of ``X`` simultaneously so that all\n rows sum to one constant and all columns sum to a different\n constant.\n\n \"\"\"\n # According to paper, this can also be done more efficiently with\n # deviation reduction and balancing algorithms.\n X = make_nonnegative(X)\n X_scaled = X\n dist = None\n for _ in range(max_iter):\n X_new, _, _ = _scale_normalize(X_scaled)\n if issparse(X):\n dist = norm(X_scaled.data - X.data)\n else:\n dist = norm(X_scaled - X_new)\n X_scaled = X_new\n if dist is not None and dist < tol:\n break\n return X_scaled\n\n\ndef _log_normalize(X):\n \"\"\"Normalize ``X`` according to Kluger's log-interactions scheme.\"\"\"\n X = make_nonnegative(X, min_value=1)\n if issparse(X):\n raise ValueError(\"Cannot compute log of a sparse matrix,\"\n \" because log(x) diverges to -infinity as x\"\n \" goes to 0.\")\n L = np.log(X)\n row_avg = L.mean(axis=1)[:, np.newaxis]\n col_avg = L.mean(axis=0)\n avg = L.mean()\n return L - row_avg - col_avg + avg\n\n\nclass BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,\n BiclusterMixin)):\n \"\"\"Base class for spectral biclustering.\"\"\"\n\n @abstractmethod\n def __init__(self, n_clusters=3, svd_method=\"randomized\",\n n_svd_vecs=None, mini_batch=False, init=\"k-means++\",\n n_init=10, n_jobs=1, random_state=None):\n self.n_clusters = n_clusters\n self.svd_method = svd_method\n self.n_svd_vecs = n_svd_vecs\n self.mini_batch = mini_batch\n self.init = init\n self.n_init = n_init\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n def _check_parameters(self):\n legal_svd_methods = ('randomized', 'arpack')\n if self.svd_method not in legal_svd_methods:\n raise ValueError(\"Unknown SVD method: '{}'. svd_method must be\"\n \" one of {}.\".format(self.svd_method,\n legal_svd_methods))\n\n def fit(self, X):\n \"\"\"Creates a biclustering for X.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n\n \"\"\"\n X, = check_arrays(X, sparse_format='csr', dtype=np.float64)\n check_array_ndim(X)\n self._check_parameters()\n self._fit(X)\n\n def _svd(self, array, n_components, n_discard):\n \"\"\"Returns first `n_components` left and right singular\n vectors u and v, discarding the first `n_discard`.\n\n \"\"\"\n if self.svd_method == 'randomized':\n kwargs = {}\n if self.n_svd_vecs is not None:\n kwargs['n_oversamples'] = self.n_svd_vecs\n u, _, vt = randomized_svd(array, n_components,\n random_state=self.random_state,\n **kwargs)\n\n elif self.svd_method == 'arpack':\n u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)\n if np.any(np.isnan(vt)):\n # some eigenvalues of A * A.T are negative, causing\n # sqrt() to be np.nan. This causes some vectors in vt\n # to be np.nan.\n _, v = eigsh(safe_sparse_dot(array.T, array),\n ncv=self.n_svd_vecs)\n vt = v.T\n if np.any(np.isnan(u)):\n _, u = eigsh(safe_sparse_dot(array, array.T),\n ncv=self.n_svd_vecs)\n\n assert_all_finite(u)\n assert_all_finite(vt)\n u = u[:, n_discard:]\n vt = vt[n_discard:]\n return u, vt.T\n\n def _k_means(self, data, n_clusters):\n if self.mini_batch:\n model = MiniBatchKMeans(n_clusters,\n init=self.init,\n n_init=self.n_init,\n random_state=self.random_state)\n else:\n model = KMeans(n_clusters, init=self.init,\n n_init=self.n_init, n_jobs=self.n_jobs,\n random_state=self.random_state)\n model.fit(data)\n centroid = model.cluster_centers_\n labels = model.labels_\n return centroid, labels\n\n\nclass SpectralCoclustering(BaseSpectral):\n \"\"\"Spectral Co-Clustering algorithm (Dhillon, 2001).\n\n Clusters rows and columns of an array `X` to solve the relaxed\n normalized cut of the bipartite graph created from `X` as follows:\n the edge between row vertex `i` and column vertex `j` has weight\n `X[i, j]`.\n\n The resulting bicluster structure is block-diagonal, since each\n row and each column belongs to exactly one bicluster.\n\n Supports sparse matrices, as long as they are nonnegative.\n\n Parameters\n ----------\n n_clusters : integer, optional, default: 3\n The number of biclusters to find.\n\n svd_method : string, optional, default: 'randomized'\n Selects the algorithm for finding singular vectors. May be\n 'randomized' or 'arpack'. If 'randomized', use\n :func:`sklearn.utils.extmath.randomized_svd`, which may be faster\n for large matrices. If 'arpack', use\n :func:`sklearn.utils.arpack.svds`, which is more accurate, but\n possibly slower in some cases.\n\n n_svd_vecs : int, optional, default: None\n Number of vectors to use in calculating the SVD. Corresponds\n to `ncv` when `svd_method=arpack` and `n_oversamples` when\n `svd_method` is 'randomized`.\n\n mini_batch : bool, optional, default: False\n Whether to use mini-batch k-means, which is faster but may get\n different results.\n\n init : {'k-means++', 'random' or an ndarray}\n Method for initialization of k-means algorithm; defaults to\n 'k-means++'.\n\n n_init : int, optional, default: 10\n Number of random initializations that are tried with the\n k-means algorithm.\n\n If mini-batch k-means is used, the best initialization is\n chosen and the algorithm runs once. Otherwise, the algorithm\n is run for each initialization and the best solution chosen.\n\n n_jobs : int, optional, default: 1\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debuging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n random_state : int seed, RandomState instance, or None (default)\n A pseudo random number generator used by the K-Means\n initialization.\n\n Attributes\n ----------\n `rows_` : array-like, shape (n_row_clusters, n_rows)\n Results of the clustering. `rows[i, r]` is True if cluster `i`\n contains row `r`. Available only after calling ``fit``.\n\n `columns_` : array-like, shape (n_column_clusters, n_columns)\n Results of the clustering, like `rows`.\n\n `row_labels_` : array-like, shape (n_rows,)\n The bicluster label of each row.\n\n `column_labels_` : array-like, shape (n_cols,)\n The bicluster label of each column.\n\n References\n ----------\n\n * Dhillon, Inderjit S, 2001. `Co-clustering documents and words using\n bipartite spectral graph partitioning\n <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.\n\n \"\"\"\n def __init__(self, n_clusters=3, svd_method='randomized',\n n_svd_vecs=None, mini_batch=False, init='k-means++',\n n_init=10, n_jobs=1, random_state=None):\n super(SpectralCoclustering, self).__init__(n_clusters,\n svd_method,\n n_svd_vecs,\n mini_batch,\n init,\n n_init,\n n_jobs,\n random_state)\n\n def _fit(self, X):\n normalized_data, row_diag, col_diag = _scale_normalize(X)\n n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))\n u, v = self._svd(normalized_data, n_sv, n_discard=1)\n z = np.vstack((row_diag[:, np.newaxis] * u,\n col_diag[:, np.newaxis] * v))\n\n _, labels = self._k_means(z, self.n_clusters)\n\n n_rows = X.shape[0]\n self.row_labels_ = labels[:n_rows]\n self.column_labels_ = labels[n_rows:]\n\n self.rows_ = np.vstack(self.row_labels_ == c\n for c in range(self.n_clusters))\n self.columns_ = np.vstack(self.column_labels_ == c\n for c in range(self.n_clusters))\n\n\nclass SpectralBiclustering(BaseSpectral):\n \"\"\"Spectral biclustering (Kluger, 2003).\n\n Partitions rows and columns under the assumption that the data has\n an underlying checkerboard structure. For instance, if there are\n two row partitions and three column partitions, each row will\n belong to three biclusters, and each column will belong to two\n biclusters. The outer product of the corresponding row and column\n label vectors gives this checkerboard structure.\n\n Parameters\n ----------\n n_clusters : integer or tuple (n_row_clusters, n_column_clusters)\n The number of row and column clusters in the checkerboard\n structure.\n\n method : string, optional, default: 'bistochastic'\n Method of normalizing and converting singular vectors into\n biclusters. May be one of 'scale', 'bistochastic', or 'log'.\n The authors recommend using 'log'. If the data is sparse,\n however, log normalization will not work, which is why the\n default is 'bistochastic'. CAUTION: if `method='log'`, the\n data must not be sparse.\n\n n_components : integer, optional, default: 6\n Number of singular vectors to check.\n\n n_best : integer, optional, default: 3\n Number of best singular vectors to which to project the data\n for clustering.\n\n svd_method : string, optional, default: 'randomized'\n Selects the algorithm for finding singular vectors. May be\n 'randomized' or 'arpack'. If 'randomized', uses\n `sklearn.utils.extmath.randomized_svd`, which may be faster\n for large matrices. If 'arpack', uses\n `sklearn.utils.arpack.svds`, which is more accurate, but\n possibly slower in some cases.\n\n n_svd_vecs : int, optional, default: None\n Number of vectors to use in calculating the SVD. Corresponds\n to `ncv` when `svd_method=arpack` and `n_oversamples` when\n `svd_method` is 'randomized`.\n\n mini_batch : bool, optional, default: False\n Whether to use mini-batch k-means, which is faster but may get\n different results.\n\n init : {'k-means++', 'random' or an ndarray}\n Method for initialization of k-means algorithm; defaults to\n 'k-means++'.\n\n n_init : int, optional, default: 10\n Number of random initializations that are tried with the\n k-means algorithm.\n\n If mini-batch k-means is used, the best initialization is\n chosen and the algorithm runs once. Otherwise, the algorithm\n is run for each initialization and the best solution chosen.\n\n n_jobs : int, optional, default: 1\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debuging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n random_state : int seed, RandomState instance, or None (default)\n A pseudo random number generator used by the K-Means\n initialization.\n\n Attributes\n ----------\n `rows_` : array-like, shape (n_row_clusters, n_rows)\n Results of the clustering. `rows[i, r]` is True if cluster `i`\n contains row `r`. Available only after calling ``fit``.\n\n `columns_` : array-like, shape (n_column_clusters, n_columns)\n Results of the clustering, like `rows`.\n\n `row_labels_` : array-like, shape (n_rows,)\n Row partition labels.\n\n `column_labels_` : array-like, shape (n_cols,)\n Column partition labels.\n\n References\n ----------\n\n * Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray\n data: coclustering genes and conditions\n <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.\n\n \"\"\"\n def __init__(self, n_clusters=3, method='bistochastic',\n n_components=6, n_best=3, svd_method='randomized',\n n_svd_vecs=None, mini_batch=False, init='k-means++',\n n_init=10, n_jobs=1, random_state=None):\n super(SpectralBiclustering, self).__init__(n_clusters,\n svd_method,\n n_svd_vecs,\n mini_batch,\n init,\n n_init,\n n_jobs,\n random_state)\n self.method = method\n self.n_components = n_components\n self.n_best = n_best\n\n def _check_parameters(self):\n super(SpectralBiclustering, self)._check_parameters()\n legal_methods = ('bistochastic', 'scale', 'log')\n if self.method not in legal_methods:\n raise ValueError(\"Unknown method: '{}'. method must be\"\n \" one of {}.\".format(self.method, legal_methods))\n try:\n int(self.n_clusters)\n except TypeError:\n try:\n r, c = self.n_clusters\n int(r)\n int(c)\n except (ValueError, TypeError):\n raise ValueError(\"Incorrect parameter n_clusters has value:\"\n \" {}. It should either be a single integer\"\n \" or an iterable with two integers:\"\n \" (n_row_clusters, n_column_clusters)\")\n if self.n_components < 1:\n raise ValueError(\"Parameter n_components must be greater than 0,\"\n \" but its value is {}\".format(self.n_components))\n if self.n_best < 1:\n raise ValueError(\"Parameter n_best must be greater than 0,\"\n \" but its value is {}\".format(self.n_best))\n if self.n_best > self.n_components:\n raise ValueError(\"n_best cannot be larger than\"\n \" n_components, but {} > {}\"\n \"\".format(self.n_best, self.n_components))\n\n def _fit(self, X):\n n_sv = self.n_components\n if self.method == 'bistochastic':\n normalized_data = _bistochastic_normalize(X)\n n_sv += 1\n elif self.method == 'scale':\n normalized_data, _, _ = _scale_normalize(X)\n n_sv += 1\n elif self.method == 'log':\n normalized_data = _log_normalize(X)\n n_discard = 0 if self.method == 'log' else 1\n u, v = self._svd(normalized_data, n_sv, n_discard)\n ut = u.T\n vt = v.T\n\n try:\n n_row_clusters, n_col_clusters = self.n_clusters\n except TypeError:\n n_row_clusters = n_col_clusters = self.n_clusters\n\n best_ut = self._fit_best_piecewise(ut, self.n_best,\n n_row_clusters)\n\n best_vt = self._fit_best_piecewise(vt, self.n_best,\n n_col_clusters)\n\n self.row_labels_ = self._project_and_cluster(X, best_vt.T,\n n_row_clusters)\n\n self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,\n n_col_clusters)\n\n self.rows_ = np.vstack(self.row_labels_ == label\n for label in range(n_row_clusters)\n for _ in range(n_col_clusters))\n self.columns_ = np.vstack(self.column_labels_ == label\n for _ in range(n_row_clusters)\n for label in range(n_col_clusters))\n\n def _fit_best_piecewise(self, vectors, n_best, n_clusters):\n \"\"\"Find the ``n_best`` vectors that are best approximated by piecewise\n constant vectors.\n\n The piecewise vectors are found by k-means; the best is chosen\n according to Euclidean distance.\n\n \"\"\"\n def make_piecewise(v):\n centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)\n return centroid[labels].ravel()\n piecewise_vectors = np.apply_along_axis(make_piecewise,\n axis=1, arr=vectors)\n dists = np.apply_along_axis(norm, axis=1,\n arr=(vectors - piecewise_vectors))\n result = vectors[np.argsort(dists)[:n_best]]\n return result\n\n def _project_and_cluster(self, data, vectors, n_clusters):\n \"\"\"Project ``data`` to ``vectors`` and cluster the result.\"\"\"\n projected = safe_sparse_dot(data, vectors)\n _, labels = self._k_means(projected, n_clusters)\n return labels\n", "\"\"\"K-means clustering\"\"\"\n\n# Authors: Gael Varoquaux <[email protected]>\n# Thomas Rueckstiess <[email protected]>\n# James Bergstra <[email protected]>\n# Jan Schlueter <[email protected]>\n# Nelle Varoquaux\n# Peter Prettenhofer <[email protected]>\n# Olivier Grisel <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Robert Layton <[email protected]>\n# License: BSD 3 clause\n\nimport warnings\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom ..base import BaseEstimator, ClusterMixin, TransformerMixin\nfrom ..metrics.pairwise import euclidean_distances\nfrom ..utils.sparsefuncs import assign_rows_csr, mean_variance_axis0\nfrom ..utils import check_arrays\nfrom ..utils import check_random_state\nfrom ..utils import atleast2d_or_csr\nfrom ..utils import as_float_array\nfrom ..externals.joblib import Parallel\nfrom ..externals.joblib import delayed\n\nfrom . import _k_means\n\n\n###############################################################################\n# Initialization heuristic\n\n\ndef _k_init(X, n_clusters, n_local_trials=None, random_state=None,\n x_squared_norms=None):\n \"\"\"Init n_clusters seeds according to k-means++\n\n Parameters\n -----------\n X: array or sparse matrix, shape (n_samples, n_features)\n The data to pick seeds for. To avoid memory copy, the input data\n should be double precision (dtype=np.float64).\n\n n_clusters: integer\n The number of seeds to choose\n\n n_local_trials: integer, optional\n The number of seeding trials for each center (except the first),\n of which the one reducing inertia the most is greedily chosen.\n Set to None to make the number of trials depend logarithmically\n on the number of seeds (2+log(k)); this is the default.\n\n random_state: integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n x_squared_norms: array, shape (n_samples,), optional\n Squared euclidean norm of each data point. Pass it if you have it at\n hands already to avoid it being recomputed here. Default: None\n\n Notes\n -----\n Selects initial cluster centers for k-mean clustering in a smart way\n to speed up convergence. see: Arthur, D. and Vassilvitskii, S.\n \"k-means++: the advantages of careful seeding\". ACM-SIAM symposium\n on Discrete algorithms. 2007\n\n Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,\n which is the implementation used in the aforementioned paper.\n \"\"\"\n n_samples, n_features = X.shape\n random_state = check_random_state(random_state)\n\n centers = np.empty((n_clusters, n_features))\n\n # Set the number of local seeding trials if none is given\n if n_local_trials is None:\n # This is what Arthur/Vassilvitskii tried, but did not report\n # specific results for other than mentioning in the conclusion\n # that it helped.\n n_local_trials = 2 + int(np.log(n_clusters))\n\n # Pick first center randomly\n center_id = random_state.randint(n_samples)\n if sp.issparse(X):\n centers[0] = X[center_id].toarray()\n else:\n centers[0] = X[center_id]\n\n # Initialize list of closest distances and calculate current potential\n if x_squared_norms is None:\n x_squared_norms = _squared_norms(X)\n closest_dist_sq = euclidean_distances(\n centers[0], X, Y_norm_squared=x_squared_norms, squared=True)\n current_pot = closest_dist_sq.sum()\n\n # Pick the remaining n_clusters-1 points\n for c in range(1, n_clusters):\n # Choose center candidates by sampling with probability proportional\n # to the squared distance to the closest existing center\n rand_vals = random_state.random_sample(n_local_trials) * current_pot\n candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)\n\n # Compute distances to center candidates\n distance_to_candidates = euclidean_distances(\n X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)\n\n # Decide which candidate is the best\n best_candidate = None\n best_pot = None\n best_dist_sq = None\n for trial in range(n_local_trials):\n # Compute potential when including center candidate\n new_dist_sq = np.minimum(closest_dist_sq,\n distance_to_candidates[trial])\n new_pot = new_dist_sq.sum()\n\n # Store result if it is the best local trial so far\n if (best_candidate is None) or (new_pot < best_pot):\n best_candidate = candidate_ids[trial]\n best_pot = new_pot\n best_dist_sq = new_dist_sq\n\n # Permanently add best center candidate found in local tries\n if sp.issparse(X):\n centers[c] = X[best_candidate].toarray()\n else:\n centers[c] = X[best_candidate]\n current_pot = best_pot\n closest_dist_sq = best_dist_sq\n\n return centers\n\n\n###############################################################################\n# K-means batch estimation by EM (expectation maximization)\n\n\ndef _tolerance(X, tol):\n \"\"\"Return a tolerance which is independent of the dataset\"\"\"\n if sp.issparse(X):\n variances = mean_variance_axis0(X)[1]\n else:\n variances = np.var(X, axis=0)\n return np.mean(variances) * tol\n\n\ndef k_means(X, n_clusters, init='k-means++', precompute_distances=True,\n n_init=10, max_iter=300, verbose=False,\n tol=1e-4, random_state=None, copy_x=True, n_jobs=1):\n \"\"\"K-means clustering algorithm.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n The observations to cluster.\n\n n_clusters : int\n The number of clusters to form as well as the number of\n centroids to generate.\n\n max_iter : int, optional, default 300\n Maximum number of iterations of the k-means algorithm to run.\n\n n_init : int, optional, default: 10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of inertia.\n\n init : {'k-means++', 'random', or ndarray, or a callable}, optional\n Method for initialization, default to 'k-means++':\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': generate k centroids from a Gaussian with mean and\n variance estimated from the data.\n\n If an ndarray is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n\n If a callable is passed, it should take arguments X, k and\n and a random state and return an initialization.\n\n tol : float, optional\n The relative increment in the results before declaring convergence.\n\n verbose : boolean, optional\n Verbosity mode.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n copy_x : boolean, optional\n When pre-computing distances it is more numerically accurate to center\n the data first. If copy_x is True, then the original data is not\n modified. If False, the original data is modified, and put back before\n the function returns, but small numerical differences may be introduced\n by subtracting and then adding the data mean.\n\n n_jobs : int\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debugging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n Returns\n -------\n centroid : float ndarray with shape (k, n_features)\n Centroids found at the last iteration of k-means.\n\n label : integer ndarray with shape (n_samples,)\n label[i] is the code or index of the centroid the\n i'th observation is closest to.\n\n inertia : float\n The final value of the inertia criterion (sum of squared distances to\n the closest centroid for all observations in the training set).\n\n \"\"\"\n random_state = check_random_state(random_state)\n\n best_inertia = np.infty\n X = as_float_array(X, copy=copy_x)\n tol = _tolerance(X, tol)\n\n # subtract of mean of x for more accurate distance computations\n if not sp.issparse(X) or hasattr(init, '__array__'):\n X_mean = X.mean(axis=0)\n if not sp.issparse(X):\n if copy_x:\n X = X.copy()\n X -= X_mean\n\n if hasattr(init, '__array__'):\n init = np.asarray(init).copy()\n init -= X_mean\n if not n_init == 1:\n warnings.warn(\n 'Explicit initial center position passed: '\n 'performing only one init in the k-means instead of %d'\n % n_init, RuntimeWarning, stacklevel=2)\n n_init = 1\n\n # precompute squared norms of data points\n x_squared_norms = _squared_norms(X)\n\n best_labels, best_inertia, best_centers = None, None, None\n if n_jobs == 1:\n # For a single thread, less memory is needed if we just store one set\n # of the best results (as opposed to one set per run per thread).\n for it in range(n_init):\n # run a k-means once\n labels, inertia, centers = _kmeans_single(\n X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,\n precompute_distances=precompute_distances, tol=tol,\n x_squared_norms=x_squared_norms, random_state=random_state)\n # determine if these results are the best so far\n if best_inertia is None or inertia < best_inertia:\n best_labels = labels.copy()\n best_centers = centers.copy()\n best_inertia = inertia\n else:\n # parallelisation of k-means runs\n seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)\n results = Parallel(n_jobs=n_jobs, verbose=0)(\n delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,\n init=init, verbose=verbose, tol=tol,\n precompute_distances=precompute_distances,\n x_squared_norms=x_squared_norms,\n # Change seed to ensure variety\n random_state=seed)\n for seed in seeds)\n # Get results with the lowest inertia\n labels, inertia, centers = zip(*results)\n best = np.argmin(inertia)\n best_labels = labels[best]\n best_inertia = inertia[best]\n best_centers = centers[best]\n if not sp.issparse(X):\n if not copy_x:\n X += X_mean\n best_centers += X_mean\n\n return best_centers, best_labels, best_inertia\n\n\ndef _kmeans_single(X, n_clusters, max_iter=300, init='k-means++',\n verbose=False, x_squared_norms=None, random_state=None,\n tol=1e-4, precompute_distances=True):\n \"\"\"A single run of k-means, assumes preparation completed prior.\n\n Parameters\n ----------\n X: array-like of floats, shape (n_samples, n_features)\n The observations to cluster.\n\n n_clusters: int\n The number of clusters to form as well as the number of\n centroids to generate.\n\n max_iter: int, optional, default 300\n Maximum number of iterations of the k-means algorithm to run.\n\n init: {'k-means++', 'random', or ndarray, or a callable}, optional\n Method for initialization, default to 'k-means++':\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': generate k centroids from a Gaussian with mean and\n variance estimated from the data.\n\n If an ndarray is passed, it should be of shape (k, p) and gives\n the initial centers.\n\n If a callable is passed, it should take arguments X, k and\n and a random state and return an initialization.\n\n tol: float, optional\n The relative increment in the results before declaring convergence.\n\n verbose: boolean, optional\n Verbosity mode\n\n x_squared_norms: array, optional\n Precomputed x_squared_norms. Calculated if not given.\n\n random_state: integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n Returns\n -------\n centroid: float ndarray with shape (k, n_features)\n Centroids found at the last iteration of k-means.\n\n label: integer ndarray with shape (n_samples,)\n label[i] is the code or index of the centroid the\n i'th observation is closest to.\n\n inertia: float\n The final value of the inertia criterion (sum of squared distances to\n the closest centroid for all observations in the training set).\n \"\"\"\n random_state = check_random_state(random_state)\n if x_squared_norms is None:\n x_squared_norms = _squared_norms(X)\n best_labels, best_inertia, best_centers = None, None, None\n # init\n centers = _init_centroids(X, n_clusters, init, random_state=random_state,\n x_squared_norms=x_squared_norms)\n if verbose:\n print('Initialization complete')\n\n # Allocate memory to store the distances for each sample to its\n # closer center for reallocation in case of ties\n distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)\n\n # iterations\n for i in range(max_iter):\n centers_old = centers.copy()\n # labels assignment is also called the E-step of EM\n labels, inertia = \\\n _labels_inertia(X, x_squared_norms, centers,\n precompute_distances=precompute_distances,\n distances=distances)\n\n # computation of the means is also called the M-step of EM\n if sp.issparse(X):\n centers = _k_means._centers_sparse(X, labels, n_clusters,\n distances)\n else:\n centers = _k_means._centers_dense(X, labels, n_clusters, distances)\n\n if verbose:\n print('Iteration %2d, inertia %.3f' % (i, inertia))\n\n if best_inertia is None or inertia < best_inertia:\n best_labels = labels.copy()\n best_centers = centers.copy()\n best_inertia = inertia\n\n if np.sum((centers_old - centers) ** 2) < tol:\n if verbose:\n print(\"Converged at iteration %d\" % i)\n break\n return best_labels, best_inertia, best_centers\n\n\ndef _squared_norms(X):\n \"\"\"Compute the squared euclidean norms of the rows of X\"\"\"\n if sp.issparse(X):\n return _k_means.csr_row_norm_l2(X, squared=True)\n else:\n # TODO: implement a cython version to avoid the memory copy of the\n # input data\n return (X ** 2).sum(axis=1)\n\n\ndef _labels_inertia_precompute_dense(X, x_squared_norms, centers):\n n_samples = X.shape[0]\n k = centers.shape[0]\n distances = euclidean_distances(centers, X, x_squared_norms,\n squared=True)\n labels = np.empty(n_samples, dtype=np.int32)\n labels.fill(-1)\n mindist = np.empty(n_samples)\n mindist.fill(np.infty)\n for center_id in range(k):\n dist = distances[center_id]\n labels[dist < mindist] = center_id\n mindist = np.minimum(dist, mindist)\n inertia = mindist.sum()\n return labels, inertia\n\n\ndef _labels_inertia(X, x_squared_norms, centers,\n precompute_distances=True, distances=None):\n \"\"\"E step of the K-means EM algorithm\n\n Compute the labels and the inertia of the given samples and centers\n\n Parameters\n ----------\n X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)\n The input samples to assign to the labels.\n\n x_squared_norms: array, shape (n_samples,)\n Precomputed squared euclidean norm of each data point, to speed up\n computations.\n\n centers: float64 array, shape (k, n_features)\n The cluster centers.\n\n distances: float64 array, shape (n_samples,)\n Distances for each sample to its closest center.\n\n Returns\n -------\n labels: int array of shape(n)\n The resulting assignment\n\n inertia: float\n The value of the inertia criterion with the assignment\n \"\"\"\n n_samples = X.shape[0]\n # set the default value of centers to -1 to be able to detect any anomaly\n # easily\n labels = - np.ones(n_samples, np.int32)\n if distances is None:\n distances = np.zeros(shape=(0,), dtype=np.float64)\n if sp.issparse(X):\n inertia = _k_means._assign_labels_csr(\n X, x_squared_norms, centers, labels, distances=distances)\n else:\n if precompute_distances:\n return _labels_inertia_precompute_dense(X, x_squared_norms,\n centers)\n inertia = _k_means._assign_labels_array(\n X, x_squared_norms, centers, labels, distances=distances)\n return labels, inertia\n\n\ndef _init_centroids(X, k, init, random_state=None, x_squared_norms=None,\n init_size=None):\n \"\"\"Compute the initial centroids\n\n Parameters\n ----------\n\n X: array, shape (n_samples, n_features)\n\n k: int\n number of centroids\n\n init: {'k-means++', 'random' or ndarray or callable} optional\n Method for initialization\n\n random_state: integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n x_squared_norms: array, shape (n_samples,), optional\n Squared euclidean norm of each data point. Pass it if you have it at\n hands already to avoid it being recomputed here. Default: None\n\n init_size : int, optional\n Number of samples to randomly sample for speeding up the\n initialization (sometimes at the expense of accuracy): the\n only algorithm is initialized by running a batch KMeans on a\n random subset of the data. This needs to be larger than k.\n\n Returns\n -------\n centers: array, shape(k, n_features)\n \"\"\"\n random_state = check_random_state(random_state)\n n_samples = X.shape[0]\n\n if init_size is not None and init_size < n_samples:\n if init_size < k:\n warnings.warn(\n \"init_size=%d should be larger than k=%d. \"\n \"Setting it to 3*k\" % (init_size, k),\n RuntimeWarning, stacklevel=2)\n init_size = 3 * k\n init_indices = random_state.random_integers(\n 0, n_samples - 1, init_size)\n X = X[init_indices]\n x_squared_norms = x_squared_norms[init_indices]\n n_samples = X.shape[0]\n elif n_samples < k:\n raise ValueError(\n \"n_samples=%d should be larger than k=%d\" % (n_samples, k))\n\n if init == 'k-means++':\n centers = _k_init(X, k, random_state=random_state,\n x_squared_norms=x_squared_norms)\n elif init == 'random':\n seeds = random_state.permutation(n_samples)[:k]\n centers = X[seeds]\n elif hasattr(init, '__array__'):\n centers = init\n elif callable(init):\n centers = init(X, k, random_state=random_state)\n else:\n raise ValueError(\"the init parameter for the k-means should \"\n \"be 'k-means++' or 'random' or an ndarray, \"\n \"'%s' (type '%s') was passed.\" % (init, type(init)))\n\n if sp.issparse(centers):\n centers = centers.toarray()\n\n if len(centers) != k:\n raise ValueError('The shape of the initial centers (%s) '\n 'does not match the number of clusters %i'\n % (centers.shape, k))\n\n return centers\n\n\nclass KMeans(BaseEstimator, ClusterMixin, TransformerMixin):\n \"\"\"K-Means clustering\n\n Parameters\n ----------\n\n n_clusters : int, optional, default: 8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n max_iter : int\n Maximum number of iterations of the k-means algorithm for a\n single run.\n\n n_init : int, optional, default: 10\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of inertia.\n\n init : {'k-means++', 'random' or an ndarray}\n Method for initialization, defaults to 'k-means++':\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': choose k observations (rows) at random from data for\n the initial centroids.\n\n If an ndarray is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n\n precompute_distances : boolean\n Precompute distances (faster but takes more memory).\n\n tol : float, optional default: 1e-4\n Relative tolerance w.r.t. inertia to declare convergence\n\n n_jobs : int\n The number of jobs to use for the computation. This works by breaking\n down the pairwise matrix into n_jobs even slices and computing them in\n parallel.\n\n If -1 all CPUs are used. If 1 is given, no parallel computing code is\n used at all, which is useful for debugging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one\n are used.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n Attributes\n ----------\n `cluster_centers_` : array, [n_clusters, n_features]\n Coordinates of cluster centers\n\n `labels_` :\n Labels of each point\n\n `inertia_` : float\n The value of the inertia criterion associated with the chosen\n partition.\n\n Notes\n ------\n The k-means problem is solved using Lloyd's algorithm.\n\n The average complexity is given by O(k n T), were n is the number of\n samples and T is the number of iteration.\n\n The worst case complexity is given by O(n^(k+2/p)) with\n n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,\n 'How slow is the k-means method?' SoCG2006)\n\n In practice, the k-means algorithm is very fast (one of the fastest\n clustering algorithms available), but it falls in local minima. That's why\n it can be useful to restart it several times.\n\n See also\n --------\n\n MiniBatchKMeans:\n Alternative online implementation that does incremental updates\n of the centers positions using mini-batches.\n For large scale learning (say n_samples > 10k) MiniBatchKMeans is\n probably much faster to than the default batch implementation.\n\n \"\"\"\n\n def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,\n tol=1e-4, precompute_distances=True,\n verbose=0, random_state=None, copy_x=True, n_jobs=1):\n\n if hasattr(init, '__array__'):\n n_clusters = init.shape[0]\n init = np.asarray(init, dtype=np.float64)\n\n self.n_clusters = n_clusters\n self.init = init\n self.max_iter = max_iter\n self.tol = tol\n self.precompute_distances = precompute_distances\n self.n_init = n_init\n self.verbose = verbose\n self.random_state = random_state\n self.copy_x = copy_x\n self.n_jobs = n_jobs\n\n def _check_fit_data(self, X):\n \"\"\"Verify that the number of samples given is larger than k\"\"\"\n X = atleast2d_or_csr(X, dtype=np.float64)\n if X.shape[0] < self.n_clusters:\n raise ValueError(\"n_samples=%d should be >= n_clusters=%d\" % (\n X.shape[0], self.n_clusters))\n return X\n\n def _check_test_data(self, X):\n X = atleast2d_or_csr(X)\n n_samples, n_features = X.shape\n expected_n_features = self.cluster_centers_.shape[1]\n if not n_features == expected_n_features:\n raise ValueError(\"Incorrect number of features. \"\n \"Got %d features, expected %d\" % (\n n_features, expected_n_features))\n if not X.dtype.kind is 'f':\n warnings.warn(\"Got data type %s, converted to float \"\n \"to avoid overflows\" % X.dtype,\n RuntimeWarning, stacklevel=2)\n X = X.astype(np.float)\n\n return X\n\n def _check_fitted(self):\n if not hasattr(self, \"cluster_centers_\"):\n raise AttributeError(\"Model has not been trained yet.\")\n\n def fit(self, X, y=None):\n \"\"\"Compute k-means clustering.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape=(n_samples, n_features)\n \"\"\"\n random_state = check_random_state(self.random_state)\n X = self._check_fit_data(X)\n\n self.cluster_centers_, self.labels_, self.inertia_ = k_means(\n X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init,\n max_iter=self.max_iter, verbose=self.verbose,\n precompute_distances=self.precompute_distances,\n tol=self.tol, random_state=random_state, copy_x=self.copy_x,\n n_jobs=self.n_jobs)\n return self\n\n def fit_predict(self, X):\n \"\"\"Compute cluster centers and predict cluster index for each sample.\n\n Convenience method; equivalent to calling fit(X) followed by\n predict(X).\n \"\"\"\n return self.fit(X).labels_\n\n def fit_transform(self, X, y=None):\n \"\"\"Compute clustering and transform X to cluster-distance space.\n\n Equivalent to fit(X).transform(X), but more efficiently implemented.\n \"\"\"\n # Currently, this just skips a copy of the data if it is not in\n # np.array or CSR format already.\n # XXX This skips _check_test_data, which may change the dtype;\n # we should refactor the input validation.\n X = self._check_fit_data(X)\n return self.fit(X)._transform(X)\n\n def transform(self, X, y=None):\n \"\"\"Transform X to a cluster-distance space\n\n In the new space, each dimension is the distance to the cluster\n centers. Note that even if X is sparse, the array returned by\n `transform` will typically be dense.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n New data to transform.\n\n Returns\n -------\n X_new : array, shape [n_samples, k]\n X transformed in the new space.\n \"\"\"\n self._check_fitted()\n X = self._check_test_data(X)\n return self._transform(X)\n\n def _transform(self, X):\n \"\"\"guts of transform method; no input validation\"\"\"\n return euclidean_distances(X, self.cluster_centers_)\n\n def predict(self, X):\n \"\"\"Predict the closest cluster each sample in X belongs to.\n\n In the vector quantization literature, `cluster_centers_` is called\n the code book and each value returned by `predict` is the index of\n the closest code in the code book.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n New data to predict.\n\n Returns\n -------\n Y : array, shape [n_samples,]\n Index of the closest center each sample belongs to.\n \"\"\"\n self._check_fitted()\n X = self._check_test_data(X)\n x_squared_norms = _squared_norms(X)\n return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]\n\n def score(self, X):\n \"\"\"Opposite of the value of X on the K-means objective.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n New data.\n\n Returns\n -------\n score : float\n Opposite of the value of X on the K-means objective.\n \"\"\"\n self._check_fitted()\n X = self._check_test_data(X)\n x_squared_norms = _squared_norms(X)\n return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]\n\n\ndef _mini_batch_step(X, x_squared_norms, centers, counts,\n old_center_buffer, compute_squared_diff,\n distances, random_reassign=False,\n random_state=None, reassignment_ratio=.01,\n verbose=False):\n \"\"\"Incremental update of the centers for the Minibatch K-Means algorithm\n\n Parameters\n ----------\n\n X: array, shape (n_samples, n_features)\n The original data array.\n\n x_squared_norms: array, shape (n_samples,)\n Squared euclidean norm of each data point.\n\n centers: array, shape (k, n_features)\n The cluster centers. This array is MODIFIED IN PLACE\n\n counts: array, shape (k,)\n The vector in which we keep track of the numbers of elements in a\n cluster. This array is MODIFIED IN PLACE\n\n distances: array, dtype float64, shape (n_samples), optional\n If not None, should be a pre-allocated array that will be used to store\n the distances of each sample to it's closest center.\n May not be None when random_reassign is True.\n\n random_state: integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n random_reassign: boolean, optional\n If True, centers with very low counts are\n randomly-reassigned to observations in dense areas.\n\n reassignment_ratio: float, optional\n Control the fraction of the maximum number of counts for a\n center to be reassigned. A higher value means that low count\n centers are more easily reassigned, which means that the\n model will take longer to converge, but should converge in a\n better clustering.\n\n verbose: bool, optional\n Controls the verbosity\n\n \"\"\"\n # Perform label assignment to nearest centers\n nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,\n distances=distances)\n if random_reassign and reassignment_ratio > 0:\n random_state = check_random_state(random_state)\n # Reassign clusters that have very low counts\n to_reassign = np.logical_or(\n (counts <= 1), counts <= reassignment_ratio * counts.max())\n number_of_reassignments = to_reassign.sum()\n if number_of_reassignments:\n # Pick new clusters amongst observations with probability\n # proportional to their closeness to their center.\n # Flip the ordering of the distances.\n distances -= distances.max()\n distances *= -1\n rand_vals = random_state.rand(number_of_reassignments)\n rand_vals *= distances.sum()\n new_centers = np.searchsorted(distances.cumsum(),\n rand_vals)\n if verbose:\n n_reassigns = to_reassign.sum()\n if n_reassigns:\n print(\"[MiniBatchKMeans] Reassigning %i cluster centers.\"\n % n_reassigns)\n\n if sp.issparse(X) and not sp.issparse(centers):\n assign_rows_csr(X, new_centers, np.where(to_reassign)[0],\n centers)\n else:\n centers[to_reassign] = X[new_centers]\n\n # implementation for the sparse CSR reprensation completely written in\n # cython\n if sp.issparse(X):\n return inertia, _k_means._mini_batch_update_csr(\n X, x_squared_norms, centers, counts, nearest_center,\n old_center_buffer, compute_squared_diff)\n\n # dense variant in mostly numpy (not as memory efficient though)\n k = centers.shape[0]\n squared_diff = 0.0\n for center_idx in range(k):\n # find points from minibatch that are assigned to this center\n center_mask = nearest_center == center_idx\n count = center_mask.sum()\n\n if count > 0:\n if compute_squared_diff:\n old_center_buffer[:] = centers[center_idx]\n\n # inplace remove previous count scaling\n centers[center_idx] *= counts[center_idx]\n\n # inplace sum with new points members of this cluster\n centers[center_idx] += np.sum(X[center_mask], axis=0)\n\n # update the count statistics for this center\n counts[center_idx] += count\n\n # inplace rescale to compute mean of all points (old and new)\n centers[center_idx] /= counts[center_idx]\n\n # update the squared diff if necessary\n if compute_squared_diff:\n squared_diff += np.sum(\n (centers[center_idx] - old_center_buffer) ** 2)\n\n return inertia, squared_diff\n\n\ndef _mini_batch_convergence(model, iteration_idx, n_iter, tol,\n n_samples, centers_squared_diff, batch_inertia,\n context, verbose=0):\n \"\"\"Helper function to encapsulte the early stopping logic\"\"\"\n # Normalize inertia to be able to compare values when\n # batch_size changes\n batch_inertia /= model.batch_size\n centers_squared_diff /= model.batch_size\n\n # Compute an Exponentially Weighted Average of the squared\n # diff to monitor the convergence while discarding\n # minibatch-local stochastic variability:\n # https://en.wikipedia.org/wiki/Moving_average\n ewa_diff = context.get('ewa_diff')\n ewa_inertia = context.get('ewa_inertia')\n if ewa_diff is None:\n ewa_diff = centers_squared_diff\n ewa_inertia = batch_inertia\n else:\n alpha = float(model.batch_size) * 2.0 / (n_samples + 1)\n alpha = 1.0 if alpha > 1.0 else alpha\n ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha\n ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha\n\n # Log progress to be able to monitor convergence\n if verbose:\n progress_msg = (\n 'Minibatch iteration %d/%d:'\n 'mean batch inertia: %f, ewa inertia: %f ' % (\n iteration_idx + 1, n_iter, batch_inertia,\n ewa_inertia))\n print(progress_msg)\n\n # Early stopping based on absolute tolerance on squared change of\n # centers position (using EWA smoothing)\n if tol > 0.0 and ewa_diff < tol:\n if verbose:\n print('Converged (small centers change) at iteration %d/%d'\n % (iteration_idx + 1, n_iter))\n return True\n\n # Early stopping heuristic due to lack of improvement on smoothed inertia\n ewa_inertia_min = context.get('ewa_inertia_min')\n no_improvement = context.get('no_improvement', 0)\n if (ewa_inertia_min is None or ewa_inertia < ewa_inertia_min):\n no_improvement = 0\n ewa_inertia_min = ewa_inertia\n else:\n no_improvement += 1\n\n if (model.max_no_improvement is not None\n and no_improvement >= model.max_no_improvement):\n if verbose:\n print('Converged (lack of improvement in inertia)'\n ' at iteration %d/%d'\n % (iteration_idx + 1, n_iter))\n return True\n\n # update the convergence context to maintain state across successive calls:\n context['ewa_diff'] = ewa_diff\n context['ewa_inertia'] = ewa_inertia\n context['ewa_inertia_min'] = ewa_inertia_min\n context['no_improvement'] = no_improvement\n return False\n\n\nclass MiniBatchKMeans(KMeans):\n \"\"\"Mini-Batch K-Means clustering\n\n Parameters\n ----------\n\n n_clusters : int, optional, default: 8\n The number of clusters to form as well as the number of\n centroids to generate.\n\n max_iter : int, optional\n Maximum number of iterations over the complete dataset before\n stopping independently of any early stopping criterion heuristics.\n\n max_no_improvement : int, optional\n Control early stopping based on the consecutive number of mini\n batches that does not yield an improvement on the smoothed inertia.\n\n To disable convergence detection based on inertia, set\n max_no_improvement to None.\n\n tol : float, optional\n Control early stopping based on the relative center changes as\n measured by a smoothed, variance-normalized of the mean center\n squared position changes. This early stopping heuristics is\n closer to the one used for the batch variant of the algorithms\n but induces a slight computational and memory overhead over the\n inertia heuristic.\n\n To disable convergence detection based on normalized center\n change, set tol to 0.0 (default).\n\n batch_size : int, optional, default: 100\n Size of the mini batches.\n\n init_size : int, optional, default: 3 * batch_size\n Number of samples to randomly sample for speeding up the\n initialization (sometimes at the expense of accuracy): the\n only algorithm is initialized by running a batch KMeans on a\n random subset of the data. This needs to be larger than k.\n\n init : {'k-means++', 'random' or an ndarray}\n Method for initialization, defaults to 'k-means++':\n\n 'k-means++' : selects initial cluster centers for k-mean\n clustering in a smart way to speed up convergence. See section\n Notes in k_init for more details.\n\n 'random': choose k observations (rows) at random from data for\n the initial centroids.\n\n\n If an ndarray is passed, it should be of shape (n_clusters, n_features)\n and gives the initial centers.\n\n n_init : int, default=3\n Number of random initializations that are tried.\n In contrast to KMeans, the algorithm is only run once, using the\n best of the ``n_init`` initializations as measured by inertia.\n\n compute_labels : boolean\n Compute label assignment and inertia for the complete dataset\n once the minibatch optimization has converged in fit.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n reassignment_ratio : float, optional\n Control the fraction of the maximum number of counts for a\n center to be reassigned. A higher value means that low count\n centers are more easily reassigned, which means that the\n model will take longer to converge, but should converge in a\n better clustering.\n\n\n Attributes\n ----------\n\n `cluster_centers_` : array, [n_clusters, n_features]\n Coordinates of cluster centers\n\n `labels_` :\n Labels of each point (if compute_labels is set to True).\n\n `inertia_` : float\n The value of the inertia criterion associated with the chosen\n partition (if compute_labels is set to True). The inertia is\n defined as the sum of square distances of samples to their nearest\n neighbor.\n\n Notes\n -----\n See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf\n \"\"\"\n\n def __init__(self, n_clusters=8, init='k-means++', max_iter=100,\n batch_size=100, verbose=0, compute_labels=True,\n random_state=None, tol=0.0, max_no_improvement=10,\n init_size=None, n_init=3, reassignment_ratio=0.01):\n\n super(MiniBatchKMeans, self).__init__(\n n_clusters=n_clusters, init=init, max_iter=max_iter,\n verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)\n\n self.max_no_improvement = max_no_improvement\n self.batch_size = batch_size\n self.compute_labels = compute_labels\n self.init_size = init_size\n self.reassignment_ratio = reassignment_ratio\n\n def fit(self, X, y=None):\n \"\"\"Compute the centroids on X by chunking it into mini-batches.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Coordinates of the data points to cluster\n \"\"\"\n random_state = check_random_state(self.random_state)\n X = check_arrays(X, sparse_format=\"csr\", copy=False,\n check_ccontiguous=True, dtype=np.float64)[0]\n n_samples, n_features = X.shape\n if n_samples < self.n_clusters:\n raise ValueError(\"Number of samples smaller than number \"\n \"of clusters.\")\n\n if hasattr(self.init, '__array__'):\n self.init = np.ascontiguousarray(self.init, dtype=np.float64)\n\n x_squared_norms = _squared_norms(X)\n\n if self.tol > 0.0:\n tol = _tolerance(X, self.tol)\n\n # using tol-based early stopping needs the allocation of a\n # dedicated before which can be expensive for high dim data:\n # hence we allocate it outside of the main loop\n old_center_buffer = np.zeros(n_features, np.double)\n else:\n tol = 0.0\n # no need for the center buffer if tol-based early stopping is\n # disabled\n old_center_buffer = np.zeros(0, np.double)\n\n distances = np.zeros(self.batch_size, dtype=np.float64)\n n_batches = int(np.ceil(float(n_samples) / self.batch_size))\n n_iter = int(self.max_iter * n_batches)\n\n init_size = self.init_size\n if init_size is None:\n init_size = 3 * self.batch_size\n if init_size > n_samples:\n init_size = n_samples\n self.init_size_ = init_size\n\n validation_indices = random_state.random_integers(\n 0, n_samples - 1, init_size)\n X_valid = X[validation_indices]\n x_squared_norms_valid = x_squared_norms[validation_indices]\n\n # perform several inits with random sub-sets\n best_inertia = None\n for init_idx in range(self.n_init):\n if self.verbose:\n print(\"Init %d/%d with method: %s\"\n % (init_idx + 1, self.n_init, self.init))\n counts = np.zeros(self.n_clusters, dtype=np.int32)\n\n # TODO: once the `k_means` function works with sparse input we\n # should refactor the following init to use it instead.\n\n # Initialize the centers using only a fraction of the data as we\n # expect n_samples to be very large when using MiniBatchKMeans\n cluster_centers = _init_centroids(\n X, self.n_clusters, self.init,\n random_state=random_state,\n x_squared_norms=x_squared_norms,\n init_size=init_size)\n\n # Compute the label assignment on the init dataset\n batch_inertia, centers_squared_diff = _mini_batch_step(\n X_valid, x_squared_norms[validation_indices],\n cluster_centers, counts, old_center_buffer, False,\n distances=distances, verbose=self.verbose)\n\n # Keep only the best cluster centers across independent inits on\n # the common validation set\n _, inertia = _labels_inertia(X_valid, x_squared_norms_valid,\n cluster_centers)\n if self.verbose:\n print(\"Inertia for init %d/%d: %f\"\n % (init_idx + 1, self.n_init, inertia))\n if best_inertia is None or inertia < best_inertia:\n self.cluster_centers_ = cluster_centers\n self.counts_ = counts\n best_inertia = inertia\n\n # Empty context to be used inplace by the convergence check routine\n convergence_context = {}\n\n # Perform the iterative optimization until the final convergence\n # criterion\n for iteration_idx in range(n_iter):\n # Sample a minibatch from the full dataset\n minibatch_indices = random_state.random_integers(\n 0, n_samples - 1, self.batch_size)\n\n # Perform the actual update step on the minibatch data\n batch_inertia, centers_squared_diff = _mini_batch_step(\n X[minibatch_indices], x_squared_norms[minibatch_indices],\n self.cluster_centers_, self.counts_,\n old_center_buffer, tol > 0.0, distances=distances,\n # Here we randomly choose whether to perform\n # random reassignment: the choice is done as a function\n # of the iteration index, and the minimum number of\n # counts, in order to force this reassignment to happen\n # every once in a while\n random_reassign=((iteration_idx + 1)\n % (10 + self.counts_.min()) == 0),\n random_state=random_state,\n reassignment_ratio=self.reassignment_ratio,\n verbose=self.verbose)\n\n # Monitor convergence and do early stopping if necessary\n if _mini_batch_convergence(\n self, iteration_idx, n_iter, tol, n_samples,\n centers_squared_diff, batch_inertia, convergence_context,\n verbose=self.verbose):\n break\n\n if self.compute_labels:\n if self.verbose:\n print('Computing label assignment and total inertia')\n self.labels_, self.inertia_ = _labels_inertia(\n X, x_squared_norms, self.cluster_centers_)\n\n return self\n\n def partial_fit(self, X, y=None):\n \"\"\"Update k means estimate on a single mini-batch X.\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Coordinates of the data points to cluster.\n \"\"\"\n\n X = check_arrays(X, sparse_format=\"csr\", copy=False)[0]\n n_samples, n_features = X.shape\n if hasattr(self.init, '__array__'):\n self.init = np.ascontiguousarray(self.init, dtype=np.float64)\n\n if n_samples == 0:\n return self\n\n x_squared_norms = _squared_norms(X)\n self.random_state_ = check_random_state(self.random_state)\n if (not hasattr(self, 'counts_')\n or not hasattr(self, 'cluster_centers_')):\n # this is the first call partial_fit on this object:\n # initialize the cluster centers\n self.cluster_centers_ = _init_centroids(\n X, self.n_clusters, self.init,\n random_state=self.random_state_,\n x_squared_norms=x_squared_norms, init_size=self.init_size)\n\n self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)\n random_reassign = False\n distances = None\n else:\n # The lower the minimum count is, the more we do random\n # reassignment, however, we don't want to do random\n # reassignment too often, to allow for building up counts\n random_reassign = self.random_state_.randint(\n 10 * (1 + self.counts_.min())) == 0\n distances = np.zeros(self.n_clusters, dtype=np.float64)\n\n _mini_batch_step(X, x_squared_norms, self.cluster_centers_,\n self.counts_, np.zeros(0, np.double), 0,\n random_reassign=random_reassign, distances=distances,\n random_state=self.random_state_,\n reassignment_ratio=self.reassignment_ratio,\n verbose=self.verbose)\n\n if self.compute_labels:\n self.labels_, self.inertia_ = _labels_inertia(\n X, x_squared_norms, self.cluster_centers_)\n\n return self\n", "# Authors: Alexandre Gramfort <[email protected]>\n# Vincent Michel <[email protected]>\n# Gilles Louppe <[email protected]>\n#\n# License: BSD 3 clause\n\n\"\"\"Recursive feature elimination for feature ranking\"\"\"\n\nimport numpy as np\nfrom ..utils import check_arrays, safe_sqr\nfrom ..base import BaseEstimator\nfrom ..base import MetaEstimatorMixin\nfrom ..base import clone\nfrom ..base import is_classifier\nfrom ..cross_validation import check_cv\nfrom .base import SelectorMixin\nfrom ..metrics.scorer import _deprecate_loss_and_score_funcs\n\n\nclass RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):\n \"\"\"Feature ranking with recursive feature elimination.\n\n Given an external estimator that assigns weights to features (e.g., the\n coefficients of a linear model), the goal of recursive feature elimination\n (RFE) is to select features by recursively considering smaller and smaller\n sets of features. First, the estimator is trained on the initial set of\n features and weights are assigned to each one of them. Then, features whose\n absolute weights are the smallest are pruned from the current set features.\n That procedure is recursively repeated on the pruned set until the desired\n number of features to select is eventually reached.\n\n Parameters\n ----------\n estimator : object\n A supervised learning estimator with a `fit` method that updates a\n `coef_` attribute that holds the fitted parameters. Important features\n must correspond to high absolute values in the `coef_` array.\n\n For instance, this is the case for most supervised learning\n algorithms such as Support Vector Classifiers and Generalized\n Linear Models from the `svm` and `linear_model` modules.\n\n n_features_to_select : int or None (default=None)\n The number of features to select. If `None`, half of the features\n are selected.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then `step` corresponds to the (integer)\n number of features to remove at each iteration.\n If within (0.0, 1.0), then `step` corresponds to the percentage\n (rounded down) of features to remove at each iteration.\n\n estimator_params : dict\n Parameters for the external estimator.\n Useful for doing grid searches.\n\n Attributes\n ----------\n `n_features_` : int\n The number of selected features.\n\n `support_` : array of shape [n_features]\n The mask of selected features.\n\n `ranking_` : array of shape [n_features]\n The feature ranking, such that `ranking_[i]` corresponds to the \\\n ranking position of the i-th feature. Selected (i.e., estimated \\\n best) features are assigned rank 1.\n\n `estimator_` : object\n The external estimator fit on the reduced dataset.\n\n Examples\n --------\n The following example shows how to retrieve the 5 right informative\n features in the Friedman #1 dataset.\n\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.feature_selection import RFE\n >>> from sklearn.svm import SVR\n >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)\n >>> estimator = SVR(kernel=\"linear\")\n >>> selector = RFE(estimator, 5, step=1)\n >>> selector = selector.fit(X, y)\n >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE\n array([ True, True, True, True, True,\n False, False, False, False, False], dtype=bool)\n >>> selector.ranking_\n array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])\n\n References\n ----------\n\n .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., \"Gene selection\n for cancer classification using support vector machines\",\n Mach. Learn., 46(1-3), 389--422, 2002.\n \"\"\"\n def __init__(self, estimator, n_features_to_select=None, step=1,\n estimator_params={}, verbose=0):\n self.estimator = estimator\n self.n_features_to_select = n_features_to_select\n self.step = step\n self.estimator_params = estimator_params\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the RFE model and then the underlying estimator on the selected\n features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n The training input samples.\n\n y : array-like, shape = [n_samples]\n The target values.\n \"\"\"\n X, y = check_arrays(X, y, sparse_format=\"csc\")\n # Initialization\n n_features = X.shape[1]\n if self.n_features_to_select is None:\n n_features_to_select = n_features / 2\n else:\n n_features_to_select = self.n_features_to_select\n\n if 0.0 < self.step < 1.0:\n step = int(self.step * n_features)\n else:\n step = int(self.step)\n if step <= 0:\n raise ValueError(\"Step must be >0\")\n\n support_ = np.ones(n_features, dtype=np.bool)\n ranking_ = np.ones(n_features, dtype=np.int)\n # Elimination\n while np.sum(support_) > n_features_to_select:\n # Remaining features\n features = np.arange(n_features)[support_]\n\n # Rank the remaining features\n estimator = clone(self.estimator)\n estimator.set_params(**self.estimator_params)\n if self.verbose > 0:\n print(\"Fitting estimator with %d features.\" % np.sum(support_))\n\n estimator.fit(X[:, features], y)\n\n if estimator.coef_.ndim > 1:\n ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))\n else:\n ranks = np.argsort(safe_sqr(estimator.coef_))\n\n # for sparse case ranks is matrix\n ranks = np.ravel(ranks)\n\n # Eliminate the worse features\n threshold = min(step, np.sum(support_) - n_features_to_select)\n support_[features[ranks][:threshold]] = False\n ranking_[np.logical_not(support_)] += 1\n\n # Set final attributes\n self.estimator_ = clone(self.estimator)\n self.estimator_.set_params(**self.estimator_params)\n self.estimator_.fit(X[:, support_], y)\n self.n_features_ = support_.sum()\n self.support_ = support_\n self.ranking_ = ranking_\n\n return self\n\n def predict(self, X):\n \"\"\"Reduce X to the selected features and then predict using the\n underlying estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n Returns\n -------\n y : array of shape [n_samples]\n The predicted target values.\n \"\"\"\n return self.estimator_.predict(self.transform(X))\n\n def score(self, X, y):\n \"\"\"Reduce X to the selected features and then return the score of the\n underlying estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n \"\"\"\n return self.estimator_.score(self.transform(X), y)\n\n def _get_support_mask(self):\n return self.support_\n\n def decision_function(self, X):\n return self.estimator_.decision_function(self.transform(X))\n\n def predict_proba(self, X):\n return self.estimator_.predict_proba(self.transform(X))\n\n\nclass RFECV(RFE, MetaEstimatorMixin):\n \"\"\"Feature ranking with recursive feature elimination and cross-validated\n selection of the best number of features.\n\n Parameters\n ----------\n estimator : object\n A supervised learning estimator with a `fit` method that updates a\n `coef_` attribute that holds the fitted parameters. Important features\n must correspond to high absolute values in the `coef_` array.\n\n For instance, this is the case for most supervised learning\n algorithms such as Support Vector Classifiers and Generalized\n Linear Models from the `svm` and `linear_model` modules.\n\n step : int or float, optional (default=1)\n If greater than or equal to 1, then `step` corresponds to the (integer)\n number of features to remove at each iteration.\n If within (0.0, 1.0), then `step` corresponds to the percentage\n (rounded down) of features to remove at each iteration.\n\n cv : int or cross-validation generator, optional (default=None)\n If int, it is the number of folds.\n If None, 3-fold cross-validation is performed by default.\n Specific cross-validation objects can also be passed, see\n `sklearn.cross_validation module` for details.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n estimator_params : dict\n Parameters for the external estimator.\n Useful for doing grid searches.\n\n verbose : int, default=0\n Controls verbosity of output.\n\n Attributes\n ----------\n `n_features_` : int\n The number of selected features with cross-validation.\n `support_` : array of shape [n_features]\n The mask of selected features.\n\n `ranking_` : array of shape [n_features]\n The feature ranking, such that `ranking_[i]`\n corresponds to the ranking\n position of the i-th feature.\n Selected (i.e., estimated best)\n features are assigned rank 1.\n\n `grid_scores_` : array of shape [n_subsets_of_features]\n The cross-validation scores such that\n `grid_scores_[i]` corresponds to\n the CV score of the i-th subset of features.\n\n `estimator_` : object\n The external estimator fit on the reduced dataset.\n\n Examples\n --------\n The following example shows how to retrieve the a-priori not known 5\n informative features in the Friedman #1 dataset.\n\n >>> from sklearn.datasets import make_friedman1\n >>> from sklearn.feature_selection import RFECV\n >>> from sklearn.svm import SVR\n >>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)\n >>> estimator = SVR(kernel=\"linear\")\n >>> selector = RFECV(estimator, step=1, cv=5)\n >>> selector = selector.fit(X, y)\n >>> selector.support_ # doctest: +NORMALIZE_WHITESPACE\n array([ True, True, True, True, True,\n False, False, False, False, False], dtype=bool)\n >>> selector.ranking_\n array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])\n\n References\n ----------\n\n .. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., \"Gene selection\n for cancer classification using support vector machines\",\n Mach. Learn., 46(1-3), 389--422, 2002.\n \"\"\"\n def __init__(self, estimator, step=1, cv=None, scoring=None,\n loss_func=None, estimator_params={}, verbose=0):\n self.estimator = estimator\n self.step = step\n self.cv = cv\n self.scoring = scoring\n self.loss_func = loss_func\n self.estimator_params = estimator_params\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the RFE model and automatically tune the number of selected\n features.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the total number of features.\n\n y : array-like, shape = [n_samples]\n Target values (integers for classification, real numbers for\n regression).\n \"\"\"\n X, y = check_arrays(X, y, sparse_format=\"csr\")\n # Initialization\n rfe = RFE(estimator=self.estimator, n_features_to_select=1,\n step=self.step, estimator_params=self.estimator_params,\n verbose=self.verbose - 1)\n\n cv = check_cv(self.cv, X, y, is_classifier(self.estimator))\n scores = np.zeros(X.shape[1])\n\n # Cross-validation\n for n, (train, test) in enumerate(cv):\n X_train, X_test = X[train], X[test]\n y_train, y_test = y[train], y[test]\n\n # Compute a full ranking of the features\n ranking_ = rfe.fit(X_train, y_train).ranking_\n # Score each subset of features\n for k in range(0, max(ranking_)):\n mask = np.where(ranking_ <= k + 1)[0]\n estimator = clone(self.estimator)\n estimator.fit(X_train[:, mask], y_train)\n\n if self.loss_func is None and self.scoring is None:\n score = estimator.score(X_test[:, mask], y_test)\n else:\n scorer = _deprecate_loss_and_score_funcs(\n loss_func=self.loss_func,\n scoring=self.scoring\n )\n score = scorer(estimator, X_test[:, mask], y_test)\n\n if self.verbose > 0:\n print(\"Finished fold with %d / %d feature ranks, score=%f\"\n % (k, max(ranking_), score))\n scores[k] += score\n\n # Pick the best number of features on average\n k = np.argmax(scores)\n best_score = scores[k]\n\n # Re-execute an elimination with best_k over the whole set\n rfe = RFE(estimator=self.estimator,\n n_features_to_select=k+1,\n step=self.step, estimator_params=self.estimator_params)\n\n rfe.fit(X, y)\n\n # Set final attributes\n self.support_ = rfe.support_\n self.n_features_ = rfe.n_features_\n self.ranking_ = rfe.ranking_\n self.estimator_ = clone(self.estimator)\n self.estimator_.set_params(**self.estimator_params)\n self.estimator_.fit(self.transform(X), y)\n\n self.grid_scores_ = scores / n\n return self\n", "# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Olivier Grisel <[email protected]>\n# Andreas Mueller <[email protected]>\n# License: BSD 3 clause\n\nimport numpy as np\n\nfrom ..base import BaseEstimator, TransformerMixin\n\nfrom ..utils.fixes import unique\nfrom ..utils import deprecated, column_or_1d\n\nfrom ..utils.multiclass import unique_labels\nfrom ..utils.multiclass import type_of_target\n\nfrom ..externals import six\n\nzip = six.moves.zip\nmap = six.moves.map\n\n__all__ = [\n 'label_binarize',\n 'LabelBinarizer',\n 'LabelEncoder',\n]\n\n\nclass LabelEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Encode labels with value between 0 and n_classes-1.\n\n Attributes\n ----------\n `classes_` : array of shape (n_class,)\n Holds the label for each class.\n\n Examples\n --------\n `LabelEncoder` can be used to normalize labels.\n\n >>> from sklearn import preprocessing\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([1, 2, 2, 6])\n LabelEncoder()\n >>> le.classes_\n array([1, 2, 6])\n >>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS\n array([0, 0, 1, 2]...)\n >>> le.inverse_transform([0, 0, 1, 2])\n array([1, 1, 2, 6])\n\n It can also be used to transform non-numerical labels (as long as they are\n hashable and comparable) to numerical labels.\n\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([\"paris\", \"paris\", \"tokyo\", \"amsterdam\"])\n LabelEncoder()\n >>> list(le.classes_)\n ['amsterdam', 'paris', 'tokyo']\n >>> le.transform([\"tokyo\", \"tokyo\", \"paris\"]) #doctest: +ELLIPSIS\n array([2, 2, 1]...)\n >>> list(le.inverse_transform([2, 2, 1]))\n ['tokyo', 'tokyo', 'paris']\n\n \"\"\"\n\n def _check_fitted(self):\n if not hasattr(self, \"classes_\"):\n raise ValueError(\"LabelEncoder was not fitted yet.\")\n\n def fit(self, y):\n \"\"\"Fit label encoder\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_ = np.unique(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label encoder and return encoded labels\n\n Parameters\n ----------\n y : array-like of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : array-like of shape [n_samples]\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_, y = unique(y, return_inverse=True)\n return y\n\n def transform(self, y):\n \"\"\"Transform labels to normalized encoding.\n\n Parameters\n ----------\n y : array-like of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : array-like of shape [n_samples]\n \"\"\"\n self._check_fitted()\n\n classes = np.unique(y)\n if len(np.intersect1d(classes, self.classes_)) < len(classes):\n diff = np.setdiff1d(classes, self.classes_)\n raise ValueError(\"y contains new labels: %s\" % str(diff))\n\n return np.searchsorted(self.classes_, y)\n\n def inverse_transform(self, y):\n \"\"\"Transform labels back to original encoding.\n\n Parameters\n ----------\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : numpy array of shape [n_samples]\n \"\"\"\n self._check_fitted()\n\n y = np.asarray(y)\n return self.classes_[y]\n\n\nclass LabelBinarizer(BaseEstimator, TransformerMixin):\n \"\"\"Binarize labels in a one-vs-all fashion\n\n Several regression and binary classification algorithms are\n available in the scikit. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n At learning time, this simply consists in learning one regressor\n or binary classifier per class. In doing so, one needs to convert\n multi-class labels to binary labels (belong or does not belong\n to the class). LabelBinarizer makes this process easy with the\n transform method.\n\n At prediction time, one assigns the class for which the corresponding\n model gave the greatest confidence. LabelBinarizer makes this easy\n with the inverse_transform method.\n\n Parameters\n ----------\n\n neg_label : int (default: 0)\n Value with which negative labels must be encoded.\n\n pos_label : int (default: 1)\n Value with which positive labels must be encoded.\n\n Attributes\n ----------\n `classes_` : array of shape [n_class]\n Holds the label for each class.\n\n `multilabel_` : boolean\n True if the transformer was fitted on a multilabel rather than a\n multiclass set of labels.\n\n Examples\n --------\n >>> from sklearn import preprocessing\n >>> lb = preprocessing.LabelBinarizer()\n >>> lb.fit([1, 2, 6, 4, 2])\n LabelBinarizer(neg_label=0, pos_label=1)\n >>> lb.classes_\n array([1, 2, 4, 6])\n >>> lb.multilabel_\n False\n >>> lb.transform([1, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n >>> lb.fit_transform([(1, 2), (3,)])\n array([[1, 1, 0],\n [0, 0, 1]])\n >>> lb.classes_\n array([1, 2, 3])\n >>> lb.multilabel_\n True\n\n See also\n --------\n label_binarize : function to perform the transform operation of\n LabelBinarizer with fixed classes.\n \"\"\"\n\n def __init__(self, neg_label=0, pos_label=1):\n if neg_label >= pos_label:\n raise ValueError(\"neg_label must be strictly less than pos_label.\")\n\n self.neg_label = neg_label\n self.pos_label = pos_label\n\n @property\n @deprecated(\"Attribute `multilabel` was renamed to `multilabel_` in \"\n \"0.14 and will be removed in 0.16\")\n def multilabel(self):\n return self.multilabel_\n\n def _check_fitted(self):\n if not hasattr(self, \"classes_\"):\n raise ValueError(\"LabelBinarizer was not fitted yet.\")\n\n def fit(self, y):\n \"\"\"Fit label binarizer\n\n Parameters\n ----------\n y : numpy array of shape (n_samples,) or sequence of sequences\n Target values. In the multilabel case the nested sequences can\n have variable lengths.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n y_type = type_of_target(y)\n self.multilabel_ = y_type.startswith('multilabel')\n if self.multilabel_:\n self.indicator_matrix_ = y_type == 'multilabel-indicator'\n\n self.classes_ = unique_labels(y)\n\n return self\n\n def transform(self, y):\n \"\"\"Transform multi-class labels to binary labels\n\n The output of transform is sometimes referred to by some authors as the\n 1-of-K coding scheme.\n\n Parameters\n ----------\n y : numpy array of shape [n_samples] or sequence of sequences\n Target values. In the multilabel case the nested sequences can\n have variable lengths.\n\n Returns\n -------\n Y : numpy array of shape [n_samples, n_classes]\n \"\"\"\n self._check_fitted()\n\n y_is_multilabel = type_of_target(y).startswith('multilabel')\n\n if y_is_multilabel and not self.multilabel_:\n raise ValueError(\"The object was not fitted with multilabel\"\n \" input.\")\n\n return label_binarize(y, self.classes_,\n multilabel=self.multilabel_,\n pos_label=self.pos_label,\n neg_label=self.neg_label)\n\n def inverse_transform(self, Y, threshold=None):\n \"\"\"Transform binary labels back to multi-class labels\n\n Parameters\n ----------\n Y : numpy array of shape [n_samples, n_classes]\n Target values.\n\n threshold : float or None\n Threshold used in the binary and multi-label cases.\n\n Use 0 when:\n - Y contains the output of decision_function (classifier)\n Use 0.5 when:\n - Y contains the output of predict_proba\n\n If None, the threshold is assumed to be half way between\n neg_label and pos_label.\n\n Returns\n -------\n y : numpy array of shape [n_samples] or sequence of sequences\n Target values. In the multilabel case the nested sequences can\n have variable lengths.\n\n Notes\n -----\n In the case when the binary labels are fractional\n (probabilistic), inverse_transform chooses the class with the\n greatest value. Typically, this allows to use the output of a\n linear model's decision_function method directly as the input\n of inverse_transform.\n \"\"\"\n self._check_fitted()\n\n if threshold is None:\n half = (self.pos_label - self.neg_label) / 2.0\n threshold = self.neg_label + half\n\n if self.multilabel_:\n Y = np.array(Y > threshold, dtype=int)\n # Return the predictions in the same format as in fit\n if self.indicator_matrix_:\n # Label indicator matrix format\n return Y\n else:\n # Lists of tuples format\n return [tuple(self.classes_[np.flatnonzero(Y[i])])\n for i in range(Y.shape[0])]\n\n if len(Y.shape) == 1 or Y.shape[1] == 1:\n y = np.array(Y.ravel() > threshold, dtype=int)\n\n else:\n y = Y.argmax(axis=1)\n\n return self.classes_[y]\n\n\ndef label_binarize(y, classes, multilabel=False, neg_label=0, pos_label=1):\n \"\"\"Binarize labels in a one-vs-all fashion\n\n Several regression and binary classification algorithms are\n available in the scikit. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n This function makes it possible to compute this transformation for a\n fixed set of class labels known ahead of time.\n\n Parameters\n ----------\n y : array-like\n Sequence of integer labels to encode.\n\n classes : array of shape [n_classes]\n Uniquely holds the label for each class.\n\n multilabel : boolean\n Set to true if y is encoding a multilabel tasks (with a variable\n number of label assignements per sample) rather than a multiclass task\n where one sample has one and only one label assigned.\n\n neg_label: int (default: 0)\n Value with which negative labels must be encoded.\n\n pos_label: int (default: 1)\n Value with which positive labels must be encoded.\n\n Examples\n --------\n >>> from sklearn.preprocessing import label_binarize\n >>> label_binarize([1, 6], classes=[1, 2, 4, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n The class ordering is preserved:\n\n >>> label_binarize([1, 6], classes=[1, 6, 4, 2])\n array([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n\n >>> label_binarize([(1, 2), (6,), ()], multilabel=True,\n ... classes=[1, 6, 4, 2])\n array([[1, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 0, 0, 0]])\n\n See also\n --------\n label_binarize : function to perform the transform operation of\n LabelBinarizer with fixed classes.\n \"\"\"\n y_type = type_of_target(y)\n\n if multilabel or len(classes) > 2:\n if y_type == 'multilabel-indicator':\n # nothing to do as y is already a label indicator matrix\n return y\n\n Y = np.zeros((len(y), len(classes)), dtype=np.int)\n else:\n Y = np.zeros((len(y), 1), dtype=np.int)\n\n Y += neg_label\n\n y_is_multilabel = y_type.startswith('multilabel')\n\n if multilabel:\n if not y_is_multilabel:\n raise ValueError(\"y should be a list of label lists/tuples,\"\n \"got %r\" % (y,))\n\n # inverse map: label => column index\n imap = dict((v, k) for k, v in enumerate(classes))\n\n for i, label_tuple in enumerate(y):\n for label in label_tuple:\n Y[i, imap[label]] = pos_label\n\n return Y\n\n else:\n y = column_or_1d(y)\n\n if len(classes) == 2:\n Y[y == classes[1], 0] = pos_label\n return Y\n\n elif len(classes) >= 2:\n for i, k in enumerate(classes):\n Y[y == k, i] = pos_label\n return Y\n\n else:\n # Only one class, returns a matrix with all negative labels.\n return Y\n", "\"\"\"numerictypes: Define the numeric type objects\n\nThis module is designed so 'from numerictypes import *' is safe.\nExported symbols include:\n\n Dictionary with all registered number types (including aliases):\n typeDict\n\n Numeric type objects:\n Bool\n Int8 Int16 Int32 Int64\n UInt8 UInt16 UInt32 UInt64\n Float32 Double64\n Complex32 Complex64\n\n Numeric type classes:\n NumericType\n BooleanType\n SignedType\n UnsignedType\n IntegralType\n SignedIntegralType\n UnsignedIntegralType\n FloatingType\n ComplexType\n\n$Id: numerictypes.py,v 1.55 2005/12/01 16:22:03 jaytmiller Exp $\n\"\"\"\n\n__all__ = ['NumericType','HasUInt64','typeDict','IsType',\n 'BooleanType', 'SignedType', 'UnsignedType', 'IntegralType',\n 'SignedIntegralType', 'UnsignedIntegralType', 'FloatingType',\n 'ComplexType', 'AnyType', 'ObjectType', 'Any', 'Object',\n 'Bool', 'Int8', 'Int16', 'Int32', 'Int64', 'Float32',\n 'Float64', 'UInt8', 'UInt16', 'UInt32', 'UInt64',\n 'Complex32', 'Complex64', 'Byte', 'Short', 'Int','Long',\n 'Float', 'Complex', 'genericTypeRank', 'pythonTypeRank',\n 'pythonTypeMap', 'scalarTypeMap', 'genericCoercions',\n 'typecodes', 'genericPromotionExclusions','MaximumType',\n 'getType','scalarTypes', 'typefrom']\n\nMAX_ALIGN = 8\nMAX_INT_SIZE = 8\n\nimport numpy\nLP64 = numpy.intp(0).itemsize == 8\n\nHasUInt64 = 1\ntry:\n numpy.int64(0)\nexcept:\n HasUInt64 = 0\n\n#from typeconv import typeConverters as _typeConverters\n#import numinclude\n#from _numerictype import _numerictype, typeDict\n\n# Enumeration of numarray type codes\ntypeDict = {}\n\n_tAny = 0\n_tBool = 1\n_tInt8 = 2\n_tUInt8 = 3\n_tInt16 = 4\n_tUInt16 = 5\n_tInt32 = 6\n_tUInt32 = 7\n_tInt64 = 8\n_tUInt64 = 9\n_tFloat32 = 10\n_tFloat64 = 11\n_tComplex32 = 12\n_tComplex64 = 13\n_tObject = 14\n\ndef IsType(rep):\n \"\"\"Determines whether the given object or string, 'rep', represents\n a numarray type.\"\"\"\n return isinstance(rep, NumericType) or rep in typeDict\n\ndef _register(name, type, force=0):\n \"\"\"Register the type object. Raise an exception if it is already registered\n unless force is true.\n \"\"\"\n if name in typeDict and not force:\n raise ValueError(\"Type %s has already been registered\" % name)\n typeDict[name] = type\n return type\n\n\nclass NumericType(object):\n \"\"\"Numeric type class\n\n Used both as a type identification and the repository of\n characteristics and conversion functions.\n \"\"\"\n def __new__(type, name, bytes, default, typeno):\n \"\"\"__new__() implements a 'quasi-singleton pattern because attempts\n to create duplicate types return the first created instance of that\n particular type parameterization, i.e. the second time you try to\n create \"Int32\", you get the original Int32, not a new one.\n \"\"\"\n if name in typeDict:\n self = typeDict[name]\n if self.bytes != bytes or self.default != default or \\\n self.typeno != typeno:\n raise ValueError(\"Redeclaration of existing NumericType \"\\\n \"with different parameters.\")\n return self\n else:\n self = object.__new__(type)\n self.name = \"no name\"\n self.bytes = None\n self.default = None\n self.typeno = -1\n return self\n\n def __init__(self, name, bytes, default, typeno):\n if not isinstance(name, str):\n raise TypeError(\"name must be a string\")\n self.name = name\n self.bytes = bytes\n self.default = default\n self.typeno = typeno\n self._conv = None\n _register(self.name, self)\n\n def __getnewargs__(self):\n \"\"\"support the pickling protocol.\"\"\"\n return (self.name, self.bytes, self.default, self.typeno)\n\n def __getstate__(self):\n \"\"\"support pickling protocol... no __setstate__ required.\"\"\"\n False\n\nclass BooleanType(NumericType):\n pass\n\nclass SignedType:\n \"\"\"Marker class used for signed type check\"\"\"\n pass\n\nclass UnsignedType:\n \"\"\"Marker class used for unsigned type check\"\"\"\n pass\n\nclass IntegralType(NumericType):\n pass\n\nclass SignedIntegralType(IntegralType, SignedType):\n pass\n\nclass UnsignedIntegralType(IntegralType, UnsignedType):\n pass\n\nclass FloatingType(NumericType):\n pass\n\nclass ComplexType(NumericType):\n pass\n\nclass AnyType(NumericType):\n pass\n\nclass ObjectType(NumericType):\n pass\n\n# C-API Type Any\n\nAny = AnyType(\"Any\", None, None, _tAny)\n\nObject = ObjectType(\"Object\", None, None, _tObject)\n\n# Numeric Types:\n\nBool = BooleanType(\"Bool\", 1, 0, _tBool)\nInt8 = SignedIntegralType( \"Int8\", 1, 0, _tInt8)\nInt16 = SignedIntegralType(\"Int16\", 2, 0, _tInt16)\nInt32 = SignedIntegralType(\"Int32\", 4, 0, _tInt32)\nInt64 = SignedIntegralType(\"Int64\", 8, 0, _tInt64)\n\nFloat32 = FloatingType(\"Float32\", 4, 0.0, _tFloat32)\nFloat64 = FloatingType(\"Float64\", 8, 0.0, _tFloat64)\n\nUInt8 = UnsignedIntegralType( \"UInt8\", 1, 0, _tUInt8)\nUInt16 = UnsignedIntegralType(\"UInt16\", 2, 0, _tUInt16)\nUInt32 = UnsignedIntegralType(\"UInt32\", 4, 0, _tUInt32)\nUInt64 = UnsignedIntegralType(\"UInt64\", 8, 0, _tUInt64)\n\nComplex32 = ComplexType(\"Complex32\", 8, complex(0.0), _tComplex32)\nComplex64 = ComplexType(\"Complex64\", 16, complex(0.0), _tComplex64)\n\nObject.dtype = 'O'\nBool.dtype = '?'\nInt8.dtype = 'i1'\nInt16.dtype = 'i2'\nInt32.dtype = 'i4'\nInt64.dtype = 'i8'\n\nUInt8.dtype = 'u1'\nUInt16.dtype = 'u2'\nUInt32.dtype = 'u4'\nUInt64.dtype = 'u8'\n\nFloat32.dtype = 'f4'\nFloat64.dtype = 'f8'\n\nComplex32.dtype = 'c8'\nComplex64.dtype = 'c16'\n\n# Aliases\n\nByte = _register(\"Byte\", Int8)\nShort = _register(\"Short\", Int16)\nInt = _register(\"Int\", Int32)\nif LP64:\n Long = _register(\"Long\", Int64)\n if HasUInt64:\n _register(\"ULong\", UInt64)\n MaybeLong = _register(\"MaybeLong\", Int64)\n __all__.append('MaybeLong')\nelse:\n Long = _register(\"Long\", Int32)\n _register(\"ULong\", UInt32)\n MaybeLong = _register(\"MaybeLong\", Int32)\n __all__.append('MaybeLong')\n\n\n_register(\"UByte\", UInt8)\n_register(\"UShort\", UInt16)\n_register(\"UInt\", UInt32)\nFloat = _register(\"Float\", Float64)\nComplex = _register(\"Complex\", Complex64)\n\n# short forms\n\n_register(\"b1\", Bool)\n_register(\"u1\", UInt8)\n_register(\"u2\", UInt16)\n_register(\"u4\", UInt32)\n_register(\"i1\", Int8)\n_register(\"i2\", Int16)\n_register(\"i4\", Int32)\n\n_register(\"i8\", Int64)\nif HasUInt64:\n _register(\"u8\", UInt64)\n\n_register(\"f4\", Float32)\n_register(\"f8\", Float64)\n_register(\"c8\", Complex32)\n_register(\"c16\", Complex64)\n\n# NumPy forms\n\n_register(\"1\", Int8)\n_register(\"B\", Bool)\n_register(\"c\", Int8)\n_register(\"b\", UInt8)\n_register(\"s\", Int16)\n_register(\"w\", UInt16)\n_register(\"i\", Int32)\n_register(\"N\", Int64)\n_register(\"u\", UInt32)\n_register(\"U\", UInt64)\n\nif LP64:\n _register(\"l\", Int64)\nelse:\n _register(\"l\", Int32)\n\n_register(\"d\", Float64)\n_register(\"f\", Float32)\n_register(\"D\", Complex64)\n_register(\"F\", Complex32)\n\n# scipy.base forms\n\ndef _scipy_alias(scipy_type, numarray_type):\n _register(scipy_type, eval(numarray_type))\n globals()[scipy_type] = globals()[numarray_type]\n\n_scipy_alias(\"bool_\", \"Bool\")\n_scipy_alias(\"bool8\", \"Bool\")\n_scipy_alias(\"int8\", \"Int8\")\n_scipy_alias(\"uint8\", \"UInt8\")\n_scipy_alias(\"int16\", \"Int16\")\n_scipy_alias(\"uint16\", \"UInt16\")\n_scipy_alias(\"int32\", \"Int32\")\n_scipy_alias(\"uint32\", \"UInt32\")\n_scipy_alias(\"int64\", \"Int64\")\n_scipy_alias(\"uint64\", \"UInt64\")\n\n_scipy_alias(\"float64\", \"Float64\")\n_scipy_alias(\"float32\", \"Float32\")\n_scipy_alias(\"complex128\", \"Complex64\")\n_scipy_alias(\"complex64\", \"Complex32\")\n\n# The rest is used by numeric modules to determine conversions\n\n# Ranking of types from lowest to highest (sorta)\nif not HasUInt64:\n genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',\n 'Int32', 'UInt32', 'Int64',\n 'Float32','Float64', 'Complex32', 'Complex64', 'Object']\nelse:\n genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',\n 'Int32', 'UInt32', 'Int64', 'UInt64',\n 'Float32','Float64', 'Complex32', 'Complex64', 'Object']\n\npythonTypeRank = [ bool, int, long, float, complex ]\n\n# The next line is not platform independent XXX Needs to be generalized\nif not LP64:\n pythonTypeMap = {\n int:(\"Int32\",\"int\"),\n long:(\"Int64\",\"int\"),\n float:(\"Float64\",\"float\"),\n complex:(\"Complex64\",\"complex\")}\n\n scalarTypeMap = {\n int:\"Int32\",\n long:\"Int64\",\n float:\"Float64\",\n complex:\"Complex64\"}\nelse:\n pythonTypeMap = {\n int:(\"Int64\",\"int\"),\n long:(\"Int64\",\"int\"),\n float:(\"Float64\",\"float\"),\n complex:(\"Complex64\",\"complex\")}\n\n scalarTypeMap = {\n int:\"Int64\",\n long:\"Int64\",\n float:\"Float64\",\n complex:\"Complex64\"}\n\npythonTypeMap.update({bool:(\"Bool\",\"bool\") })\nscalarTypeMap.update({bool:\"Bool\"})\n\n# Generate coercion matrix\n\ndef _initGenericCoercions():\n global genericCoercions\n genericCoercions = {}\n\n # vector with ...\n for ntype1 in genericTypeRank:\n nt1 = typeDict[ntype1]\n rank1 = genericTypeRank.index(ntype1)\n ntypesize1, inttype1, signedtype1 = nt1.bytes, \\\n isinstance(nt1, IntegralType), isinstance(nt1, SignedIntegralType)\n for ntype2 in genericTypeRank:\n # vector\n nt2 = typeDict[ntype2]\n ntypesize2, inttype2, signedtype2 = nt2.bytes, \\\n isinstance(nt2, IntegralType), isinstance(nt2, SignedIntegralType)\n rank2 = genericTypeRank.index(ntype2)\n if (signedtype1 != signedtype2) and inttype1 and inttype2:\n # mixing of signed and unsigned ints is a special case\n # If unsigned same size or larger, final size needs to be bigger\n # if possible\n if signedtype1:\n if ntypesize2 >= ntypesize1:\n size = min(2*ntypesize2, MAX_INT_SIZE)\n else:\n size = ntypesize1\n else:\n if ntypesize1 >= ntypesize2:\n size = min(2*ntypesize1, MAX_INT_SIZE)\n else:\n size = ntypesize2\n outtype = \"Int\"+str(8*size)\n else:\n if rank1 >= rank2:\n outtype = ntype1\n else:\n outtype = ntype2\n genericCoercions[(ntype1, ntype2)] = outtype\n\n for ntype2 in pythonTypeRank:\n # scalar\n mapto, kind = pythonTypeMap[ntype2]\n if ((inttype1 and kind==\"int\") or (not inttype1 and kind==\"float\")):\n # both are of the same \"kind\" thus vector type dominates\n outtype = ntype1\n else:\n rank2 = genericTypeRank.index(mapto)\n if rank1 >= rank2:\n outtype = ntype1\n else:\n outtype = mapto\n genericCoercions[(ntype1, ntype2)] = outtype\n genericCoercions[(ntype2, ntype1)] = outtype\n\n # scalar-scalar\n for ntype1 in pythonTypeRank:\n maptype1 = scalarTypeMap[ntype1]\n genericCoercions[(ntype1,)] = maptype1\n for ntype2 in pythonTypeRank:\n maptype2 = scalarTypeMap[ntype2]\n genericCoercions[(ntype1, ntype2)] = genericCoercions[(maptype1, maptype2)]\n\n # Special cases more easily dealt with outside of the loop\n genericCoercions[(\"Complex32\", \"Float64\")] = \"Complex64\"\n genericCoercions[(\"Float64\", \"Complex32\")] = \"Complex64\"\n genericCoercions[(\"Complex32\", \"Int64\")] = \"Complex64\"\n genericCoercions[(\"Int64\", \"Complex32\")] = \"Complex64\"\n genericCoercions[(\"Complex32\", \"UInt64\")] = \"Complex64\"\n genericCoercions[(\"UInt64\", \"Complex32\")] = \"Complex64\"\n\n genericCoercions[(\"Int64\",\"Float32\")] = \"Float64\"\n genericCoercions[(\"Float32\", \"Int64\")] = \"Float64\"\n genericCoercions[(\"UInt64\",\"Float32\")] = \"Float64\"\n genericCoercions[(\"Float32\", \"UInt64\")] = \"Float64\"\n\n genericCoercions[(float, \"Bool\")] = \"Float64\"\n genericCoercions[(\"Bool\", float)] = \"Float64\"\n\n genericCoercions[(float,float,float)] = \"Float64\" # for scipy.special\n genericCoercions[(int,int,float)] = \"Float64\" # for scipy.special\n\n_initGenericCoercions()\n\n# If complex is subclassed, the following may not be necessary\ngenericPromotionExclusions = {\n 'Bool': (),\n 'Int8': (),\n 'Int16': (),\n 'Int32': ('Float32','Complex32'),\n 'UInt8': (),\n 'UInt16': (),\n 'UInt32': ('Float32','Complex32'),\n 'Int64' : ('Float32','Complex32'),\n 'UInt64' : ('Float32','Complex32'),\n 'Float32': (),\n 'Float64': ('Complex32',),\n 'Complex32':(),\n 'Complex64':()\n} # e.g., don't allow promotion from Float64 to Complex32 or Int64 to Float32\n\n# Numeric typecodes\ntypecodes = {'Integer': '1silN',\n 'UnsignedInteger': 'bBwuU',\n 'Float': 'fd',\n 'Character': 'c',\n 'Complex': 'FD' }\n\nif HasUInt64:\n _MaximumType = {\n Bool : UInt64,\n\n Int8 : Int64,\n Int16 : Int64,\n Int32 : Int64,\n Int64 : Int64,\n\n UInt8 : UInt64,\n UInt16 : UInt64,\n UInt32 : UInt64,\n UInt8 : UInt64,\n\n Float32 : Float64,\n Float64 : Float64,\n\n Complex32 : Complex64,\n Complex64 : Complex64\n }\nelse:\n _MaximumType = {\n Bool : Int64,\n\n Int8 : Int64,\n Int16 : Int64,\n Int32 : Int64,\n Int64 : Int64,\n\n UInt8 : Int64,\n UInt16 : Int64,\n UInt32 : Int64,\n UInt8 : Int64,\n\n Float32 : Float64,\n Float64 : Float64,\n\n Complex32 : Complex64,\n Complex64 : Complex64\n }\n\ndef MaximumType(t):\n \"\"\"returns the type of highest precision of the same general kind as 't'\"\"\"\n return _MaximumType[t]\n\n\ndef getType(type):\n \"\"\"Return the numeric type object for type\n\n type may be the name of a type object or the actual object\n \"\"\"\n if isinstance(type, NumericType):\n return type\n try:\n return typeDict[type]\n except KeyError:\n raise TypeError(\"Not a numeric type\")\n\nscalarTypes = (bool,int,long,float,complex)\n\n_scipy_dtypechar = {\n Int8 : 'b',\n UInt8 : 'B',\n Int16 : 'h',\n UInt16 : 'H',\n Int32 : 'i',\n UInt32 : 'I',\n Int64 : 'q',\n UInt64 : 'Q',\n Float32 : 'f',\n Float64 : 'd',\n Complex32 : 'F', # Note the switchup here:\n Complex64 : 'D' # numarray.Complex32 == scipy.complex64, etc.\n }\n\n_scipy_dtypechar_inverse = {}\nfor key,value in _scipy_dtypechar.items():\n _scipy_dtypechar_inverse[value] = key\n\n_val = numpy.int_(0).itemsize\nif _val == 8:\n _scipy_dtypechar_inverse['l'] = Int64\n _scipy_dtypechar_inverse['L'] = UInt64\nelif _val == 4:\n _scipy_dtypechar_inverse['l'] = Int32\n _scipy_dtypechar_inverse['L'] = UInt32\n\ndel _val\n\nif LP64:\n _scipy_dtypechar_inverse['p'] = Int64\n _scipy_dtypechar_inverse['P'] = UInt64\nelse:\n _scipy_dtypechar_inverse['p'] = Int32\n _scipy_dtypechar_inverse['P'] = UInt32\n\ndef typefrom(obj):\n return _scipy_dtypechar_inverse[obj.dtype.char]\n", "\"\"\"Isomap for manifold learning\"\"\"\n\n# Author: Jake Vanderplas -- <[email protected]>\n# License: BSD 3 clause (C) 2011\n\nimport numpy as np\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..neighbors import NearestNeighbors, kneighbors_graph\nfrom ..utils import check_arrays\nfrom ..utils.graph import graph_shortest_path\nfrom ..decomposition import KernelPCA\nfrom ..preprocessing import KernelCenterer\n\n\nclass Isomap(BaseEstimator, TransformerMixin):\n \"\"\"Isomap Embedding\n\n Non-linear dimensionality reduction through Isometric Mapping\n\n Parameters\n ----------\n n_neighbors : integer\n number of neighbors to consider for each point.\n\n n_components : integer\n number of coordinates for the manifold\n\n eigen_solver : ['auto'|'arpack'|'dense']\n 'auto' : Attempt to choose the most efficient solver\n for the given problem.\n 'arpack' : Use Arnoldi decomposition to find the eigenvalues\n and eigenvectors.\n 'dense' : Use a direct solver (i.e. LAPACK)\n for the eigenvalue decomposition.\n\n tol : float\n Convergence tolerance passed to arpack or lobpcg.\n not used if eigen_solver == 'dense'.\n\n max_iter : integer\n Maximum number of iterations for the arpack solver.\n not used if eigen_solver == 'dense'.\n\n path_method : string ['auto'|'FW'|'D']\n Method to use in finding shortest path.\n 'auto' : attempt to choose the best algorithm automatically\n 'FW' : Floyd-Warshall algorithm\n 'D' : Dijkstra algorithm with Fibonacci Heaps\n\n neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']\n Algorithm to use for nearest neighbors search,\n passed to neighbors.NearestNeighbors instance.\n\n Attributes\n ----------\n `embedding_` : array-like, shape (n_samples, n_components)\n Stores the embedding vectors.\n\n `kernel_pca_` : object\n `KernelPCA` object used to implement the embedding.\n\n `training_data_` : array-like, shape (n_samples, n_features)\n Stores the training data.\n\n `nbrs_` : sklearn.neighbors.NearestNeighbors instance\n Stores nearest neighbors instance, including BallTree or KDtree\n if applicable.\n\n `dist_matrix_` : array-like, shape (n_samples, n_samples)\n Stores the geodesic distance matrix of training data.\n\n References\n ----------\n\n [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric\n framework for nonlinear dimensionality reduction. Science 290 (5500)\n \"\"\"\n\n def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',\n tol=0, max_iter=None, path_method='auto',\n neighbors_algorithm='auto'):\n\n self.n_neighbors = n_neighbors\n self.n_components = n_components\n self.eigen_solver = eigen_solver\n self.tol = tol\n self.max_iter = max_iter\n self.path_method = path_method\n self.neighbors_algorithm = neighbors_algorithm\n self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,\n algorithm=neighbors_algorithm)\n\n def _fit_transform(self, X):\n X, = check_arrays(X, sparse_format='dense')\n self.nbrs_.fit(X)\n self.training_data_ = self.nbrs_._fit_X\n self.kernel_pca_ = KernelPCA(n_components=self.n_components,\n kernel=\"precomputed\",\n eigen_solver=self.eigen_solver,\n tol=self.tol, max_iter=self.max_iter)\n\n kng = kneighbors_graph(self.nbrs_, self.n_neighbors,\n mode='distance')\n\n self.dist_matrix_ = graph_shortest_path(kng,\n method=self.path_method,\n directed=False)\n G = self.dist_matrix_ ** 2\n G *= -0.5\n\n self.embedding_ = self.kernel_pca_.fit_transform(G)\n\n def reconstruction_error(self):\n \"\"\"Compute the reconstruction error for the embedding.\n\n Returns\n -------\n reconstruction_error : float\n\n Notes\n -------\n The cost function of an isomap embedding is\n\n ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``\n\n Where D is the matrix of distances for the input data X,\n D_fit is the matrix of distances for the output embedding X_fit,\n and K is the isomap kernel:\n\n ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``\n \"\"\"\n G = -0.5 * self.dist_matrix_ ** 2\n G_center = KernelCenterer().fit_transform(G)\n evals = self.kernel_pca_.lambdas_\n return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]\n\n def fit(self, X, y=None):\n \"\"\"Compute the embedding vectors for data X\n\n Parameters\n ----------\n X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}\n Sample data, shape = (n_samples, n_features), in the form of a\n numpy array, precomputed tree, or NearestNeighbors\n object.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._fit_transform(X)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the model from data in X and transform X.\n\n Parameters\n ----------\n X: {array-like, sparse matrix, BallTree, KDTree}\n Training vector, where n_samples in the number of samples\n and n_features is the number of features.\n\n Returns\n -------\n X_new: array-like, shape (n_samples, n_components)\n \"\"\"\n self._fit_transform(X)\n return self.embedding_\n\n def transform(self, X):\n \"\"\"Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X to each point in\n the training data are computed in order to construct the kernel.\n The embedding of X is the projection of this kernel onto the\n embedding vectors of the training set.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n\n Returns\n -------\n X_new: array-like, shape (n_samples, n_components)\n \"\"\"\n distances, indices = self.nbrs_.kneighbors(X, return_distance=True)\n\n #Create the graph of shortest distances from X to self.training_data_\n # via the nearest neighbors of X.\n #This can be done as a single array operation, but it potentially\n # takes a lot of memory. To avoid that, use a loop:\n G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))\n for i in range(X.shape[0]):\n G_X[i] = np.min((self.dist_matrix_[indices[i]]\n + distances[i][:, None]), 0)\n\n G_X **= 2\n G_X *= -0.5\n\n return self.kernel_pca_.transform(G_X)\n", "\"\"\"\nGeneral tests for all estimators in sklearn.\n\"\"\"\n\n# Authors: Andreas Mueller <[email protected]>\n# Gael Varoquaux [email protected]\n# License: BSD 3 clause\nfrom __future__ import print_function\n\nimport os\nimport warnings\nimport sys\nimport traceback\nimport inspect\nimport pickle\nimport pkgutil\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom sklearn.externals.six import PY3\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_true\nfrom sklearn.utils.testing import assert_false\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import all_estimators\nfrom sklearn.utils.testing import meta_estimators\nfrom sklearn.utils.testing import set_random_state\nfrom sklearn.utils.testing import assert_greater\n\nimport sklearn\nfrom sklearn.base import (clone, ClassifierMixin, RegressorMixin,\n TransformerMixin, ClusterMixin)\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import (load_iris, load_boston, make_blobs,\n make_classification)\nfrom sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score\n\nfrom sklearn.lda import LDA\nfrom sklearn.svm.base import BaseLibSVM\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.utils.validation import DataConversionWarning\n\ndont_test = ['SparseCoder', 'EllipticEnvelope', 'EllipticEnvelop',\n 'DictVectorizer', 'LabelBinarizer', 'LabelEncoder',\n 'TfidfTransformer', 'IsotonicRegression', 'OneHotEncoder',\n 'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',\n 'DummyRegressor', 'TruncatedSVD']\n\n\ndef test_all_estimators():\n # Test that estimators are default-constructible, clonable\n # and have working repr.\n estimators = all_estimators(include_meta_estimators=True)\n classifier = LDA()\n\n for name, Estimator in estimators:\n # some can just not be sensibly default constructed\n if name in dont_test:\n continue\n # test default-constructibility\n # get rid of deprecation warnings\n with warnings.catch_warnings(record=True):\n if name in meta_estimators:\n estimator = Estimator(classifier)\n else:\n estimator = Estimator()\n # test cloning\n clone(estimator)\n # test __repr__\n repr(estimator)\n # test that set_params returns self\n assert_true(isinstance(estimator.set_params(), Estimator))\n\n # test if init does nothing but set parameters\n # this is important for grid_search etc.\n # We get the default parameters from init and then\n # compare these against the actual values of the attributes.\n\n # this comes from getattr. Gets rid of deprecation decorator.\n init = getattr(estimator.__init__, 'deprecated_original',\n estimator.__init__)\n try:\n args, varargs, kws, defaults = inspect.getargspec(init)\n except TypeError:\n # init is not a python function.\n # true for mixins\n continue\n params = estimator.get_params()\n if name in meta_estimators:\n # they need a non-default argument\n args = args[2:]\n else:\n args = args[1:]\n if args:\n # non-empty list\n assert_equal(len(args), len(defaults))\n else:\n continue\n for arg, default in zip(args, defaults):\n if arg not in params.keys():\n # deprecated parameter, not in get_params\n assert_true(default is None)\n continue\n\n if isinstance(params[arg], np.ndarray):\n assert_array_equal(params[arg], default)\n else:\n assert_equal(params[arg], default)\n\n\ndef test_all_estimator_no_base_class():\n for name, Estimator in all_estimators():\n msg = (\"Base estimators such as {0} should not be included\"\n \" in all_estimators\").format(name)\n assert_false(name.lower().startswith('base'), msg=msg)\n\n\ndef test_estimators_sparse_data():\n # All estimators should either deal with sparse data, or raise an\n # intelligible error message\n rng = np.random.RandomState(0)\n X = rng.rand(40, 10)\n X[X < .8] = 0\n X = sparse.csr_matrix(X)\n y = (4 * rng.rand(40)).astype(np.int)\n estimators = all_estimators()\n estimators = [(name, Estimator) for name, Estimator in estimators\n if issubclass(Estimator, (ClassifierMixin, RegressorMixin))]\n for name, Classifier in estimators:\n if name in dont_test:\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n classifier = Classifier()\n # fit\n try:\n classifier.fit(X, y)\n except TypeError as e:\n if not 'sparse' in repr(e):\n print(\"Estimator %s doesn't seem to fail gracefully on \"\n \"sparse data\" % name)\n traceback.print_exc(file=sys.stdout)\n raise e\n except Exception as exc:\n print(\"Estimator %s doesn't seem to fail gracefully on \"\n \"sparse data\" % name)\n traceback.print_exc(file=sys.stdout)\n raise exc\n\n\ndef test_transformers():\n # test if transformers do something sensible on training set\n # also test all shapes / shape errors\n transformers = all_estimators(type_filter='transformer')\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n n_samples, n_features = X.shape\n X = StandardScaler().fit_transform(X)\n X -= X.min()\n\n succeeded = True\n\n for name, Transformer in transformers:\n if name in dont_test:\n continue\n # these don't actually fit the data:\n if name in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n transformer = Transformer()\n set_random_state(transformer)\n if hasattr(transformer, 'compute_importances'):\n transformer.compute_importances = True\n\n if name == 'SelectKBest':\n # SelectKBest has a default of k=10\n # which is more feature than we have.\n transformer.k = 1\n elif name in ['GaussianRandomProjection',\n 'SparseRandomProjection']:\n # Due to the jl lemma and very few samples, the number\n # of components of the random matrix projection will be greater\n # than the number of features.\n # So we impose a smaller number (avoid \"auto\" mode)\n transformer.n_components = 1\n elif name == \"MiniBatchDictionaryLearning\":\n transformer.set_params(n_iter=5) # default = 1000\n\n elif name == \"KernelPCA\":\n transformer.remove_zero_eig = False\n\n # fit\n\n if name in ('PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'):\n y_ = np.c_[y, y]\n y_[::2, 1] *= 2\n else:\n y_ = y\n\n try:\n transformer.fit(X, y_)\n X_pred = transformer.fit_transform(X, y=y_)\n if isinstance(X_pred, tuple):\n for x_pred in X_pred:\n assert_equal(x_pred.shape[0], n_samples)\n else:\n assert_equal(X_pred.shape[0], n_samples)\n except Exception as e:\n print(transformer)\n print(e)\n print()\n succeeded = False\n continue\n\n if hasattr(transformer, 'transform'):\n if name in ('PLSCanonical', 'PLSRegression', 'CCA',\n 'PLSSVD'):\n X_pred2 = transformer.transform(X, y_)\n X_pred3 = transformer.fit_transform(X, y=y_)\n else:\n X_pred2 = transformer.transform(X)\n X_pred3 = transformer.fit_transform(X, y=y_)\n if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):\n for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):\n assert_array_almost_equal(\n x_pred, x_pred2, 2,\n \"fit_transform not correct in %s\" % Transformer)\n assert_array_almost_equal(\n x_pred3, x_pred2, 2,\n \"fit_transform not correct in %s\" % Transformer)\n else:\n assert_array_almost_equal(\n X_pred, X_pred2, 2,\n \"fit_transform not correct in %s\" % Transformer)\n assert_array_almost_equal(\n X_pred3, X_pred2, 2,\n \"fit_transform not correct in %s\" % Transformer)\n\n # raises error on malformed input for transform\n assert_raises(ValueError, transformer.transform, X.T)\n assert_true(succeeded)\n\n\ndef test_transformers_sparse_data():\n # All estimators should either deal with sparse data, or raise an\n # intelligible error message\n rng = np.random.RandomState(0)\n X = rng.rand(40, 10)\n X[X < .8] = 0\n X = sparse.csr_matrix(X)\n y = (4 * rng.rand(40)).astype(np.int)\n estimators = all_estimators(type_filter='transformer')\n for name, Transformer in estimators:\n if name in dont_test:\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n if name in ['Scaler', 'StandardScaler']:\n transformer = Transformer(with_mean=False)\n elif name in ['GaussianRandomProjection',\n 'SparseRandomProjection']:\n # Due to the jl lemma and very few samples, the number\n # of components of the random matrix projection will be greater\n # than the number of features.\n # So we impose a smaller number (avoid \"auto\" mode)\n transformer = Transformer(n_components=np.int(X.shape[1] / 4))\n else:\n transformer = Transformer()\n # fit\n try:\n transformer.fit(X, y)\n except TypeError as e:\n if not 'sparse' in repr(e):\n print(\"Estimator %s doesn't seem to fail gracefully on \"\n \"sparse data\" % name)\n traceback.print_exc(file=sys.stdout)\n raise e\n except Exception as exc:\n print(\"Estimator %s doesn't seem to fail gracefully on \"\n \"sparse data\" % name)\n traceback.print_exc(file=sys.stdout)\n raise exc\n\n\ndef test_estimators_nan_inf():\n # Test that all estimators check their input for NaN's and infs\n rnd = np.random.RandomState(0)\n X_train_finite = rnd.uniform(size=(10, 3))\n X_train_nan = rnd.uniform(size=(10, 3))\n X_train_nan[0, 0] = np.nan\n X_train_inf = rnd.uniform(size=(10, 3))\n X_train_inf[0, 0] = np.inf\n y = np.ones(10)\n y[:5] = 0\n estimators = all_estimators()\n estimators = [(name, E) for name, E in estimators\n if (issubclass(E, ClassifierMixin) or\n issubclass(E, RegressorMixin) or\n issubclass(E, TransformerMixin) or\n issubclass(E, ClusterMixin))]\n error_string_fit = \"Estimator doesn't check for NaN and inf in fit.\"\n error_string_predict = (\"Estimator doesn't check for NaN and inf in\"\n \" predict.\")\n error_string_transform = (\"Estimator doesn't check for NaN and inf in\"\n \" transform.\")\n for X_train in [X_train_nan, X_train_inf]:\n for name, Estimator in estimators:\n if name in dont_test:\n continue\n if name in ('PLSCanonical', 'PLSRegression', 'CCA',\n 'PLSSVD', 'Imputer'): # Imputer accepts nan\n continue\n\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n estimator = Estimator()\n if name in ['GaussianRandomProjection',\n 'SparseRandomProjection']:\n # Due to the jl lemma and very few samples, the number\n # of components of the random matrix projection will be\n # greater\n # than the number of features.\n # So we impose a smaller number (avoid \"auto\" mode)\n estimator = Estimator(n_components=1)\n\n set_random_state(estimator, 1)\n # try to fit\n try:\n if issubclass(Estimator, ClusterMixin):\n estimator.fit(X_train)\n else:\n estimator.fit(X_train, y)\n except ValueError as e:\n if not 'inf' in repr(e) and not 'NaN' in repr(e):\n print(error_string_fit, Estimator, e)\n traceback.print_exc(file=sys.stdout)\n raise e\n except Exception as exc:\n print(error_string_fit, Estimator, exc)\n traceback.print_exc(file=sys.stdout)\n raise exc\n else:\n raise AssertionError(error_string_fit, Estimator)\n # actually fit\n if issubclass(Estimator, ClusterMixin):\n # All estimators except clustering algorithm\n # support fitting with (optional) y\n estimator.fit(X_train_finite)\n else:\n estimator.fit(X_train_finite, y)\n\n # predict\n if hasattr(estimator, \"predict\"):\n try:\n estimator.predict(X_train)\n except ValueError as e:\n if not 'inf' in repr(e) and not 'NaN' in repr(e):\n print(error_string_predict, Estimator, e)\n traceback.print_exc(file=sys.stdout)\n raise e\n except Exception as exc:\n print(error_string_predict, Estimator, exc)\n traceback.print_exc(file=sys.stdout)\n else:\n raise AssertionError(error_string_predict, Estimator)\n\n # transform\n if hasattr(estimator, \"transform\"):\n try:\n estimator.transform(X_train)\n except ValueError as e:\n if not 'inf' in repr(e) and not 'NaN' in repr(e):\n print(error_string_transform, Estimator, e)\n traceback.print_exc(file=sys.stdout)\n raise e\n except Exception as exc:\n print(error_string_transform, Estimator, exc)\n traceback.print_exc(file=sys.stdout)\n else:\n raise AssertionError(error_string_transform, Estimator)\n\n\ndef test_transformers_pickle():\n # test if transformers do something sensible on training set\n # also test all shapes / shape errors\n transformers = all_estimators(type_filter='transformer')\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n n_samples, n_features = X.shape\n X = StandardScaler().fit_transform(X)\n X -= X.min()\n\n succeeded = True\n\n for name, Transformer in transformers:\n if name in dont_test:\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n transformer = Transformer()\n if not hasattr(transformer, 'transform'):\n continue\n set_random_state(transformer)\n if hasattr(transformer, 'compute_importances'):\n transformer.compute_importances = True\n\n if name == \"SelectKBest\":\n # SelectKBest has a default of k=10\n # which is more feature than we have.\n transformer.k = 1\n elif name in ['GaussianRandomProjection', 'SparseRandomProjection']:\n # Due to the jl lemma and very few samples, the number\n # of components of the random matrix projection will be greater\n # than the number of features.\n # So we impose a smaller number (avoid \"auto\" mode)\n transformer.n_components = 1\n\n # fit\n if name in ('PLSCanonical', 'PLSRegression', 'CCA',\n 'PLSSVD'):\n random_state = np.random.RandomState(seed=12345)\n y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n\n transformer.fit(X, y_)\n X_pred = transformer.fit(X, y_).transform(X)\n pickled_transformer = pickle.dumps(transformer)\n unpickled_transformer = pickle.loads(pickled_transformer)\n pickled_X_pred = unpickled_transformer.transform(X)\n\n try:\n assert_array_almost_equal(pickled_X_pred, X_pred)\n except Exception as exc:\n succeeded = False\n print (\"Transformer %s doesn't predict the same value \"\n \"after pickling\" % name)\n raise exc\n\n assert_true(succeeded)\n\n\ndef test_classifiers_one_label():\n # test classifiers trained on a single label always return this label\n # or raise an sensible error message\n rnd = np.random.RandomState(0)\n X_train = rnd.uniform(size=(10, 3))\n X_test = rnd.uniform(size=(10, 3))\n y = np.ones(10)\n classifiers = all_estimators(type_filter='classifier')\n error_string_fit = \"Classifier can't train when only one class is present.\"\n error_string_predict = (\"Classifier can't predict when only one class is \"\n \"present.\")\n for name, Classifier in classifiers:\n if name in dont_test:\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n classifier = Classifier()\n # try to fit\n try:\n classifier.fit(X_train, y)\n except ValueError as e:\n if not 'class' in repr(e):\n print(error_string_fit, Classifier, e)\n traceback.print_exc(file=sys.stdout)\n raise e\n else:\n continue\n except Exception as exc:\n print(error_string_fit, Classifier, exc)\n traceback.print_exc(file=sys.stdout)\n raise exc\n # predict\n try:\n assert_array_equal(classifier.predict(X_test), y)\n except Exception as exc:\n print(error_string_predict, Classifier, exc)\n traceback.print_exc(file=sys.stdout)\n\n\ndef test_clustering():\n # test if clustering algorithms do something sensible\n # also test all shapes / shape errors\n clustering = all_estimators(type_filter='cluster')\n iris = load_iris()\n X, y = iris.data, iris.target\n X, y = shuffle(X, y, random_state=7)\n n_samples, n_features = X.shape\n X = StandardScaler().fit_transform(X)\n for name, Alg in clustering:\n if name == 'WardAgglomeration':\n # this is clustering on the features\n # let's not test that here.\n continue\n # catch deprecation and neighbors warnings\n with warnings.catch_warnings(record=True):\n alg = Alg()\n if hasattr(alg, \"n_clusters\"):\n alg.set_params(n_clusters=3)\n set_random_state(alg)\n if name == 'AffinityPropagation':\n alg.set_params(preference=-100)\n # fit\n alg.fit(X)\n\n assert_equal(alg.labels_.shape, (n_samples,))\n pred = alg.labels_\n assert_greater(adjusted_rand_score(pred, y), 0.4)\n # fit another time with ``fit_predict`` and compare results\n if name is 'SpectralClustering':\n # there is no way to make Spectral clustering deterministic :(\n continue\n set_random_state(alg)\n with warnings.catch_warnings(record=True):\n pred2 = alg.fit_predict(X)\n assert_array_equal(pred, pred2)\n\n\ndef test_classifiers_train():\n # test if classifiers do something sensible on training set\n # also test all shapes / shape errors\n classifiers = all_estimators(type_filter='classifier')\n X_m, y_m = make_blobs(random_state=0)\n X_m, y_m = shuffle(X_m, y_m, random_state=7)\n X_m = StandardScaler().fit_transform(X_m)\n # generate binary problem from multi-class one\n y_b = y_m[y_m != 2]\n X_b = X_m[y_m != 2]\n for (X, y) in [(X_m, y_m), (X_b, y_b)]:\n # do it once with binary, once with multiclass\n classes = np.unique(y)\n n_classes = len(classes)\n n_samples, n_features = X.shape\n for name, Classifier in classifiers:\n if name in dont_test:\n continue\n if name in ['MultinomialNB', 'BernoulliNB']:\n # TODO also test these!\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n classifier = Classifier()\n # raises error on malformed input for fit\n assert_raises(ValueError, classifier.fit, X, y[:-1])\n\n # fit\n classifier.fit(X, y)\n assert_true(hasattr(classifier, \"classes_\"))\n y_pred = classifier.predict(X)\n assert_equal(y_pred.shape, (n_samples,))\n # training set performance\n assert_greater(accuracy_score(y, y_pred), 0.85)\n\n # raises error on malformed input for predict\n assert_raises(ValueError, classifier.predict, X.T)\n if hasattr(classifier, \"decision_function\"):\n try:\n # decision_function agrees with predict:\n decision = classifier.decision_function(X)\n if n_classes is 2:\n assert_equal(decision.ravel().shape, (n_samples,))\n dec_pred = (decision.ravel() > 0).astype(np.int)\n assert_array_equal(dec_pred, y_pred)\n if (n_classes is 3\n and not isinstance(classifier, BaseLibSVM)):\n # 1on1 of LibSVM works differently\n assert_equal(decision.shape, (n_samples, n_classes))\n assert_array_equal(np.argmax(decision, axis=1), y_pred)\n\n # raises error on malformed input\n assert_raises(ValueError,\n classifier.decision_function, X.T)\n # raises error on malformed input for decision_function\n assert_raises(ValueError,\n classifier.decision_function, X.T)\n except NotImplementedError:\n pass\n if hasattr(classifier, \"predict_proba\"):\n try:\n # predict_proba agrees with predict:\n y_prob = classifier.predict_proba(X)\n assert_equal(y_prob.shape, (n_samples, n_classes))\n assert_array_equal(np.argmax(y_prob, axis=1), y_pred)\n # check that probas for all classes sum to one\n assert_array_almost_equal(\n np.sum(y_prob, axis=1), np.ones(n_samples))\n # raises error on malformed input\n assert_raises(ValueError, classifier.predict_proba, X.T)\n # raises error on malformed input for predict_proba\n assert_raises(ValueError, classifier.predict_proba, X.T)\n except NotImplementedError:\n pass\n\n\ndef test_classifiers_classes():\n # test if classifiers can cope with non-consecutive classes\n classifiers = all_estimators(type_filter='classifier')\n iris = load_iris()\n X, y = iris.data, iris.target\n X, y = shuffle(X, y, random_state=1)\n X = StandardScaler().fit_transform(X)\n y_names = iris.target_names[y]\n for name, Classifier in classifiers:\n if name in dont_test:\n continue\n if name in ['MultinomialNB', 'BernoulliNB']:\n # TODO also test these!\n continue\n if name in [\"LabelPropagation\", \"LabelSpreading\"]:\n # TODO some complication with -1 label\n y_ = y\n else:\n y_ = y_names\n\n classes = np.unique(y_)\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n classifier = Classifier()\n # fit\n try:\n classifier.fit(X, y_)\n except Exception as e:\n print(e)\n\n y_pred = classifier.predict(X)\n # training set performance\n assert_array_equal(np.unique(y_), np.unique(y_pred))\n accuracy = accuracy_score(y_, y_pred)\n assert_greater(accuracy, 0.78,\n \"accuracy %f of %s not greater than 0.78\"\n % (accuracy, name))\n #assert_array_equal(\n #clf.classes_, classes,\n #\"Unexpected classes_ attribute for %r\" % clf)\n if np.any(classifier.classes_ != classes):\n print(\"Unexpected classes_ attribute for %r: expected %s, got %s\" %\n (classifier, classes, classifier.classes_))\n\n\ndef test_classifiers_input_shapes():\n # test if classifiers can cope with y.shape = (n_samples, 1)\n classifiers = all_estimators(type_filter='classifier')\n iris = load_iris()\n X, y = iris.data, iris.target\n X, y = shuffle(X, y, random_state=1)\n X = StandardScaler().fit_transform(X)\n for name, Classifier in classifiers:\n if name in dont_test:\n continue\n if name in [\"MultinomialNB\", \"LabelPropagation\", \"LabelSpreading\"]:\n # TODO some complication with -1 label\n continue\n if name in [\"DecisionTreeClassifier\", \"ExtraTreeClassifier\"]:\n # We don't raise a warning in these classifiers, as\n # the column y interface is used by the forests.\n continue\n\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n classifier = Classifier()\n set_random_state(classifier)\n # fit\n classifier.fit(X, y)\n y_pred = classifier.predict(X)\n\n set_random_state(classifier)\n # Check that when a 2D y is given, a DataConversionWarning is\n # raised\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\", DataConversionWarning)\n classifier.fit(X, y[:, np.newaxis])\n try:\n assert_equal(len(w), 1)\n assert_array_equal(y_pred, classifier.predict(X))\n except Exception:\n print(classifier)\n raise\n\n\ndef test_classifiers_pickle():\n # test if classifiers do something sensible on training set\n # also test all shapes / shape errors\n classifiers = all_estimators(type_filter='classifier')\n X_m, y_m = make_blobs(random_state=0)\n X_m, y_m = shuffle(X_m, y_m, random_state=7)\n X_m = StandardScaler().fit_transform(X_m)\n # generate binary problem from multi-class one\n y_b = y_m[y_m != 2]\n X_b = X_m[y_m != 2]\n succeeded = True\n for (X, y) in [(X_m, y_m), (X_b, y_b)]:\n # do it once with binary, once with multiclass\n n_samples, n_features = X.shape\n for name, Classifier in classifiers:\n if name in dont_test:\n continue\n if name in ['MultinomialNB', 'BernoulliNB']:\n # TODO also test these!\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n classifier = Classifier()\n # raises error on malformed input for fit\n assert_raises(ValueError, classifier.fit, X, y[:-1])\n\n # fit\n classifier.fit(X, y)\n y_pred = classifier.predict(X)\n pickled_classifier = pickle.dumps(classifier)\n unpickled_classifier = pickle.loads(pickled_classifier)\n pickled_y_pred = unpickled_classifier.predict(X)\n\n try:\n assert_array_almost_equal(pickled_y_pred, y_pred)\n except Exception as exc:\n succeeded = False\n print (\"Estimator %s doesn't predict the same value \"\n \"after pickling\" % name)\n raise exc\n assert_true(succeeded)\n\n\nBOSTON = None\n\n\ndef _boston_subset():\n global BOSTON\n if BOSTON is None:\n boston = load_boston()\n X, y = boston.data, boston.target\n X, y = shuffle(X, y, random_state=0)\n X, y = X[:200], y[:200]\n X = StandardScaler().fit_transform(X)\n BOSTON = X, y\n return BOSTON\n\n\ndef test_regressors_int():\n # test if regressors can cope with integer labels (by converting them to\n # float)\n regressors = all_estimators(type_filter='regressor')\n X, _ = _boston_subset()\n X = X[:50]\n rnd = np.random.RandomState(0)\n y = rnd.randint(2, size=X.shape[0])\n for name, Regressor in regressors:\n if name in dont_test or name in ('CCA'):\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n # separate estimators to control random seeds\n regressor_1 = Regressor()\n regressor_2 = Regressor()\n set_random_state(regressor_1)\n set_random_state(regressor_2)\n\n if name in ('_PLS', 'PLSCanonical', 'PLSRegression'):\n y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n\n # fit\n regressor_1.fit(X, y_)\n pred1 = regressor_1.predict(X)\n regressor_2.fit(X, y_.astype(np.float))\n pred2 = regressor_2.predict(X)\n assert_array_almost_equal(pred1, pred2, 2, name)\n\n\ndef test_regressors_train():\n regressors = all_estimators(type_filter='regressor')\n # TODO: test with intercept\n # TODO: test with multiple responses\n X, y = _boston_subset()\n y = StandardScaler().fit_transform(y) # X is already scaled\n rnd = np.random.RandomState(0)\n succeeded = True\n for name, Regressor in regressors:\n if name in dont_test:\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n regressor = Regressor()\n if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):\n # linear regressors need to set alpha, but not generalized CV ones\n regressor.alpha = 0.01\n\n # raises error on malformed input for fit\n assert_raises(ValueError, regressor.fit, X, y[:-1])\n # fit\n try:\n if name in ('PLSCanonical', 'PLSRegression', 'CCA'):\n y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n regressor.fit(X, y_)\n regressor.predict(X)\n\n if name not in ('PLSCanonical', 'CCA'): # TODO: find out why\n assert_greater(regressor.score(X, y_), 0.5)\n except Exception as e:\n print(regressor)\n print(e)\n print()\n succeeded = False\n\n assert_true(succeeded)\n\n\ndef test_regressor_pickle():\n # Test that estimators can be pickled, and once pickled\n # give the same answer as before.\n regressors = all_estimators(type_filter='regressor')\n X, y = _boston_subset()\n # TODO: test with intercept\n # TODO: test with multiple responses\n y = StandardScaler().fit_transform(y) # X is already scaled\n rnd = np.random.RandomState(0)\n succeeded = True\n for name, Regressor in regressors:\n if name in dont_test:\n continue\n # catch deprecation warnings\n with warnings.catch_warnings(record=True):\n regressor = Regressor()\n if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):\n # linear regressors need to set alpha, but not generalized CV ones\n regressor.alpha = 0.01\n\n if name in ('PLSCanonical', 'PLSRegression', 'CCA'):\n y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n regressor.fit(X, y_)\n y_pred = regressor.predict(X)\n # store old predictions\n pickled_regressor = pickle.dumps(regressor)\n unpickled_regressor = pickle.loads(pickled_regressor)\n pickled_y_pred = unpickled_regressor.predict(X)\n\n try:\n assert_array_almost_equal(pickled_y_pred, y_pred)\n except Exception as exc:\n succeeded = False\n print (\"Estimator %s doesn't predict the same value \"\n \"after pickling\" % name)\n raise exc\n assert_true(succeeded)\n\n\ndef test_configure():\n # Smoke test the 'configure' step of setup, this tests all the\n # 'configure' functions in the setup.pys in the scikit\n cwd = os.getcwd()\n setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))\n setup_filename = os.path.join(setup_path, 'setup.py')\n if not os.path.exists(setup_filename):\n return\n try:\n os.chdir(setup_path)\n old_argv = sys.argv\n sys.argv = ['setup.py', 'config']\n with warnings.catch_warnings():\n # The configuration spits out warnings when not finding\n # Blas/Atlas development headers\n warnings.simplefilter('ignore', UserWarning)\n if PY3:\n exec(open('setup.py').read(), dict(__name__='__main__'))\n else:\n execfile('setup.py', dict(__name__='__main__'))\n finally:\n sys.argv = old_argv\n os.chdir(cwd)\n\n\ndef test_class_weight_classifiers():\n # test that class_weight works and that the semantics are consistent\n classifiers = all_estimators(type_filter='classifier')\n\n with warnings.catch_warnings(record=True):\n classifiers = [c for c in classifiers\n if 'class_weight' in c[1]().get_params().keys()]\n\n for n_centers in [2, 3]:\n # create a very noisy dataset\n X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=0)\n for name, Classifier in classifiers:\n if name == \"NuSVC\":\n # the sparse version has a parameter that doesn't do anything\n continue\n if name.endswith(\"NB\"):\n # NaiveBayes classifiers have a somewhat different interface.\n # FIXME SOON!\n continue\n if n_centers == 2:\n class_weight = {0: 1000, 1: 0.0001}\n else:\n class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}\n\n with warnings.catch_warnings(record=True):\n classifier = Classifier(class_weight=class_weight)\n if hasattr(classifier, \"n_iter\"):\n classifier.set_params(n_iter=100)\n\n set_random_state(classifier)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n assert_greater(np.mean(y_pred == 0), 0.9)\n\n\ndef test_class_weight_auto_classifies():\n # test that class_weight=\"auto\" improves f1-score\n classifiers = all_estimators(type_filter='classifier')\n\n with warnings.catch_warnings(record=True):\n classifiers = [c for c in classifiers\n if 'class_weight' in c[1]().get_params().keys()]\n\n for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):\n # create unbalanced dataset\n X, y = make_classification(n_classes=n_classes, n_samples=200,\n n_features=10, weights=weights,\n random_state=0, n_informative=n_classes)\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=0)\n for name, Classifier in classifiers:\n if name == \"NuSVC\":\n # the sparse version has a parameter that doesn't do anything\n continue\n\n if name.startswith(\"RidgeClassifier\"):\n # RidgeClassifier behaves unexpected\n # FIXME!\n continue\n\n if name.endswith(\"NB\"):\n # NaiveBayes classifiers have a somewhat different interface.\n # FIXME SOON!\n continue\n\n with warnings.catch_warnings(record=True):\n classifier = Classifier()\n if hasattr(classifier, \"n_iter\"):\n classifier.set_params(n_iter=100)\n\n set_random_state(classifier)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n\n classifier.set_params(class_weight='auto')\n classifier.fit(X_train, y_train)\n y_pred_auto = classifier.predict(X_test)\n assert_greater(f1_score(y_test, y_pred_auto),\n f1_score(y_test, y_pred))\n\n\ndef test_estimators_overwrite_params():\n # test whether any classifier overwrites his init parameters during fit\n for est_type in [\"classifier\", \"regressor\", \"transformer\"]:\n estimators = all_estimators(type_filter=est_type)\n X, y = make_blobs(random_state=0, n_samples=9)\n # some want non-negative input\n X -= X.min()\n for name, Estimator in estimators:\n if (name in dont_test\n or name in ['CCA', '_CCA', 'PLSCanonical',\n 'PLSRegression',\n 'PLSSVD', 'GaussianProcess']):\n # FIXME!\n # in particular GaussianProcess!\n continue\n with warnings.catch_warnings(record=True):\n # catch deprecation warnings\n estimator = Estimator()\n\n if hasattr(estimator, 'batch_size'):\n # FIXME\n # for MiniBatchDictLearning\n estimator.batch_size = 1\n\n if name in ['GaussianRandomProjection',\n 'SparseRandomProjection']:\n # Due to the jl lemma and very few samples, the number\n # of components of the random matrix projection will be\n # greater\n # than the number of features.\n # So we impose a smaller number (avoid \"auto\" mode)\n estimator = Estimator(n_components=1)\n\n set_random_state(estimator)\n\n params = estimator.get_params()\n estimator.fit(X, y)\n new_params = estimator.get_params()\n for k, v in params.items():\n assert_false(np.any(new_params[k] != v),\n \"Estimator %s changes its parameter %s\"\n \" from %s to %s during fit.\"\n % (name, k, v, new_params[k]))\n\n\ndef test_cluster_overwrite_params():\n # test whether any classifier overwrites his init parameters during fit\n clusterers = all_estimators(type_filter=\"cluster\")\n X, y = make_blobs(random_state=0, n_samples=9)\n # some want non-negative input\n X\n for name, Clustering in clusterers:\n with warnings.catch_warnings(record=True):\n # catch deprecation warnings\n clustering = Clustering()\n params = clustering.get_params()\n clustering.fit(X)\n new_params = clustering.get_params()\n for k, v in params.items():\n assert_false(np.any(new_params[k] != v),\n \"Estimator %s changes its parameter %s\"\n \" from %s to %s during fit.\"\n % (name, k, v, new_params[k]))\n\n\ndef test_import_all_consistency():\n # Smoke test to check that any name in a __all__ list is actually defined\n # in the namespace of the module or package.\n pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',\n onerror=lambda _: None)\n for importer, modname, ispkg in pkgs:\n if \".tests.\" in modname:\n continue\n package = __import__(modname, fromlist=\"dummy\")\n for name in getattr(package, '__all__', ()):\n if getattr(package, name, None) is None:\n raise AttributeError(\n \"Module '{}' has no attribute '{}'\".format(\n modname, name))\n", "\"\"\"Utilities for input validation\"\"\"\n# Authors: Olivier Grisel and Gael Varoquaux and others (please update me)\n# License: BSD 3 clause\n\nimport warnings\nimport numbers\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom ..externals import six\nfrom .fixes import safe_copy\n\n\nclass DataConversionWarning(UserWarning):\n \"A warning on implicit data conversions happening in the code\"\n pass\n\n\nwarnings.simplefilter(\"always\", DataConversionWarning)\n\n\ndef _assert_all_finite(X):\n \"\"\"Like assert_all_finite, but only for ndarray.\"\"\"\n if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())\n and not np.isfinite(X).all()):\n raise ValueError(\"Array contains NaN or infinity.\")\n\n\ndef assert_all_finite(X):\n \"\"\"Throw a ValueError if X contains NaN or infinity.\n\n Input MUST be an np.ndarray instance or a scipy.sparse matrix.\"\"\"\n\n # First try an O(n) time, O(1) space solution for the common case that\n # there everything is finite; fall back to O(n) space np.isfinite to\n # prevent false positives from overflow in sum method.\n _assert_all_finite(X.data if sparse.issparse(X) else X)\n\n\ndef safe_asarray(X, dtype=None, order=None, copy=False):\n \"\"\"Convert X to an array or sparse matrix.\n\n Prevents copying X when possible; sparse matrices are passed through.\"\"\"\n if sparse.issparse(X):\n if copy:\n X = X.copy()\n assert_all_finite(X.data)\n else:\n X = np.array(X, dtype=dtype, order=order, copy=copy)\n assert_all_finite(X)\n return X\n\n\ndef as_float_array(X, copy=True):\n \"\"\"Converts an array-like to an array of floats\n\n The new dtype will be np.float32 or np.float64, depending on the original\n type. The function can create a copy or modify the argument depending\n on the argument copy.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n\n copy : bool, optional\n If True, a copy of X will be created. If False, a copy may still be\n returned if X's dtype is not a floating point type.\n\n Returns\n -------\n XT : {array, sparse matrix}\n An array of type np.float\n \"\"\"\n if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)\n and not sparse.issparse(X)):\n return safe_asarray(X, dtype=np.float64, copy=copy)\n elif sparse.issparse(X) and X.dtype in [np.float32, np.float64]:\n return X.copy() if copy else X\n elif X.dtype in [np.float32, np.float64]: # is numpy array\n return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X\n else:\n return X.astype(np.float32 if X.dtype == np.int32 else np.float64)\n\n\ndef array2d(X, dtype=None, order=None, copy=False, force_all_finite=True):\n \"\"\"Returns at least 2-d array with data from X\"\"\"\n if sparse.issparse(X):\n raise TypeError('A sparse matrix was passed, but dense data '\n 'is required. Use X.toarray() to convert to dense.')\n X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)\n if force_all_finite:\n _assert_all_finite(X_2d)\n if X is X_2d and copy:\n X_2d = safe_copy(X_2d)\n return X_2d\n\n\ndef _atleast2d_or_sparse(X, dtype, order, copy, sparse_class, convmethod,\n force_all_finite):\n if sparse.issparse(X):\n if dtype is None or X.dtype == dtype:\n X = getattr(X, convmethod)()\n else:\n X = sparse_class(X, dtype=dtype)\n if force_all_finite:\n _assert_all_finite(X.data)\n X.data = np.array(X.data, copy=False, order=order)\n else:\n X = array2d(X, dtype=dtype, order=order, copy=copy,\n force_all_finite=force_all_finite)\n if force_all_finite:\n _assert_all_finite(X)\n return X\n\n\ndef atleast2d_or_csc(X, dtype=None, order=None, copy=False,\n force_all_finite=True):\n \"\"\"Like numpy.atleast_2d, but converts sparse matrices to CSC format.\n\n Also, converts np.matrix to np.ndarray.\n \"\"\"\n return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csc_matrix,\n \"tocsc\", force_all_finite)\n\n\ndef atleast2d_or_csr(X, dtype=None, order=None, copy=False,\n force_all_finite=True):\n \"\"\"Like numpy.atleast_2d, but converts sparse matrices to CSR format\n\n Also, converts np.matrix to np.ndarray.\n \"\"\"\n return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csr_matrix,\n \"tocsr\", force_all_finite)\n\n\ndef _num_samples(x):\n \"\"\"Return number of samples in array-like x.\"\"\"\n if not hasattr(x, '__len__') and not hasattr(x, 'shape'):\n raise TypeError(\"Expected sequence or array-like, got %r\" % x)\n return x.shape[0] if hasattr(x, 'shape') else len(x)\n\n\ndef check_arrays(*arrays, **options):\n \"\"\"Check that all arrays have consistent first dimensions.\n\n Checks whether all objects in arrays have the same shape or length.\n By default lists and tuples are converted to numpy arrays.\n\n It is possible to enforce certain properties, such as dtype, continguity\n and sparse matrix format (if a sparse matrix is passed).\n\n Converting lists to arrays can be disabled by setting ``allow_lists=True``.\n Lists can then contain arbitrary objects and are not checked for dtype,\n finiteness or anything else but length. Arrays are still checked\n and possibly converted.\n\n\n Parameters\n ----------\n *arrays : sequence of arrays or scipy.sparse matrices with same shape[0]\n Python lists or tuples occurring in arrays are converted to 1D numpy\n arrays, unless allow_lists is specified.\n\n sparse_format : 'csr', 'csc' or 'dense', None by default\n If not None, any scipy.sparse matrix is converted to\n Compressed Sparse Rows or Compressed Sparse Columns representations.\n If 'dense', an error is raised when a sparse array is\n passed.\n\n copy : boolean, False by default\n If copy is True, ensure that returned arrays are copies of the original\n (if not already converted to another format earlier in the process).\n\n check_ccontiguous : boolean, False by default\n Check that the arrays are C contiguous\n\n dtype : a numpy dtype instance, None by default\n Enforce a specific dtype.\n\n allow_lists : bool\n Allow lists of arbitrary objects as input, just check their length.\n Disables\n \"\"\"\n sparse_format = options.pop('sparse_format', None)\n if sparse_format not in (None, 'csr', 'csc', 'dense'):\n raise ValueError('Unexpected sparse format: %r' % sparse_format)\n copy = options.pop('copy', False)\n check_ccontiguous = options.pop('check_ccontiguous', False)\n dtype = options.pop('dtype', None)\n allow_lists = options.pop('allow_lists', False)\n if options:\n raise TypeError(\"Unexpected keyword arguments: %r\" % options.keys())\n\n if len(arrays) == 0:\n return None\n\n n_samples = _num_samples(arrays[0])\n\n checked_arrays = []\n for array in arrays:\n array_orig = array\n if array is None:\n # special case: ignore optional y=None kwarg pattern\n checked_arrays.append(array)\n continue\n size = _num_samples(array)\n\n if size != n_samples:\n raise ValueError(\"Found array with dim %d. Expected %d\"\n % (size, n_samples))\n\n if not allow_lists or hasattr(array, \"shape\"):\n if sparse.issparse(array):\n if sparse_format == 'csr':\n array = array.tocsr()\n elif sparse_format == 'csc':\n array = array.tocsc()\n elif sparse_format == 'dense':\n raise TypeError('A sparse matrix was passed, but dense '\n 'data is required. Use X.toarray() to '\n 'convert to a dense numpy array.')\n if check_ccontiguous:\n array.data = np.ascontiguousarray(array.data, dtype=dtype)\n else:\n array.data = np.asarray(array.data, dtype=dtype)\n _assert_all_finite(array.data)\n else:\n if check_ccontiguous:\n array = np.ascontiguousarray(array, dtype=dtype)\n else:\n array = np.asarray(array, dtype=dtype)\n _assert_all_finite(array)\n\n if copy and array is array_orig:\n array = array.copy()\n checked_arrays.append(array)\n\n return checked_arrays\n\n\ndef column_or_1d(y, warn=False):\n \"\"\" Ravel column or 1d numpy array, else raises an error\n\n Parameters\n ----------\n y : array-like\n\n Returns\n -------\n y : array\n\n \"\"\"\n shape = np.shape(y)\n if len(shape) == 1:\n return np.ravel(y)\n if len(shape) == 2 and shape[1] == 1:\n if warn:\n warnings.warn(\"A column-vector y was passed when a 1d array was\"\n \" expected. Please change the shape of y to \"\n \"(n_samples, ), for example using ravel().\",\n DataConversionWarning, stacklevel=2)\n return np.ravel(y)\n\n raise ValueError(\"bad input shape {0}\".format(shape))\n\n\ndef warn_if_not_float(X, estimator='This algorithm'):\n \"\"\"Warning utility function to check that data type is floating point.\n\n Returns True if a warning was raised (i.e. the input is not float) and\n False otherwise, for easier input validation.\n \"\"\"\n if not isinstance(estimator, six.string_types):\n estimator = estimator.__class__.__name__\n if X.dtype.kind != 'f':\n warnings.warn(\"%s assumes floating point values as input, \"\n \"got %s\" % (estimator, X.dtype))\n return True\n return False\n\n\ndef check_random_state(seed):\n \"\"\"Turn seed into a np.random.RandomState instance\n\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)\n", "from numpy.testing import *\nfrom numpy.core import *\nfrom numpy import matrix, asmatrix, bmat\nfrom numpy.matrixlib.defmatrix import matrix_power\nfrom numpy.matrixlib import mat\nimport numpy as np\n\nclass TestCtor(TestCase):\n def test_basic(self):\n A = array([[1,2],[3,4]])\n mA = matrix(A)\n assert all(mA.A == A)\n\n B = bmat(\"A,A;A,A\")\n C = bmat([[A,A], [A,A]])\n D = array([[1,2,1,2],\n [3,4,3,4],\n [1,2,1,2],\n [3,4,3,4]])\n assert all(B.A == D)\n assert all(C.A == D)\n\n E = array([[5,6],[7,8]])\n AEresult = matrix([[1,2,5,6],[3,4,7,8]])\n assert all(bmat([A,E]) == AEresult)\n\n vec = arange(5)\n mvec = matrix(vec)\n assert mvec.shape == (1,5)\n\n def test_bmat_nondefault_str(self):\n A = array([[1,2],[3,4]])\n B = array([[5,6],[7,8]])\n Aresult = array([[1,2,1,2],\n [3,4,3,4],\n [1,2,1,2],\n [3,4,3,4]])\n Bresult = array([[5,6,5,6],\n [7,8,7,8],\n [5,6,5,6],\n [7,8,7,8]])\n mixresult = array([[1,2,5,6],\n [3,4,7,8],\n [5,6,1,2],\n [7,8,3,4]])\n assert all(bmat(\"A,A;A,A\") == Aresult)\n assert all(bmat(\"A,A;A,A\",ldict={'A':B}) == Aresult)\n assert_raises(TypeError, bmat, \"A,A;A,A\",gdict={'A':B})\n assert all(bmat(\"A,A;A,A\",ldict={'A':A},gdict={'A':B}) == Aresult)\n b2 = bmat(\"A,B;C,D\",ldict={'A':A,'B':B},gdict={'C':B,'D':A})\n assert all(b2 == mixresult)\n\n\nclass TestProperties(TestCase):\n def test_sum(self):\n \"\"\"Test whether matrix.sum(axis=1) preserves orientation.\n Fails in NumPy <= 0.9.6.2127.\n \"\"\"\n M = matrix([[1,2,0,0],\n [3,4,0,0],\n [1,2,1,2],\n [3,4,3,4]])\n sum0 = matrix([8,12,4,6])\n sum1 = matrix([3,7,6,14]).T\n sumall = 30\n assert_array_equal(sum0, M.sum(axis=0))\n assert_array_equal(sum1, M.sum(axis=1))\n assert sumall == M.sum()\n\n\n def test_prod(self):\n x = matrix([[1,2,3],[4,5,6]])\n assert x.prod() == 720\n assert all(x.prod(0) == matrix([[4,10,18]]))\n assert all(x.prod(1) == matrix([[6],[120]]))\n\n y = matrix([0,1,3])\n assert y.prod() == 0\n\n def test_max(self):\n x = matrix([[1,2,3],[4,5,6]])\n assert x.max() == 6\n assert all(x.max(0) == matrix([[4,5,6]]))\n assert all(x.max(1) == matrix([[3],[6]]))\n\n def test_min(self):\n x = matrix([[1,2,3],[4,5,6]])\n assert x.min() == 1\n assert all(x.min(0) == matrix([[1,2,3]]))\n assert all(x.min(1) == matrix([[1],[4]]))\n\n def test_ptp(self):\n x = np.arange(4).reshape((2,2))\n assert x.ptp() == 3\n assert all(x.ptp(0) == array([2, 2]))\n assert all(x.ptp(1) == array([1, 1]))\n\n def test_var(self):\n x = np.arange(9).reshape((3,3))\n mx = x.view(np.matrix)\n assert_equal(x.var(ddof=0), mx.var(ddof=0))\n assert_equal(x.var(ddof=1), mx.var(ddof=1))\n\n def test_basic(self):\n import numpy.linalg as linalg\n\n A = array([[1., 2.],\n [3., 4.]])\n mA = matrix(A)\n assert allclose(linalg.inv(A), mA.I)\n assert all(array(transpose(A) == mA.T))\n assert all(array(transpose(A) == mA.H))\n assert all(A == mA.A)\n\n B = A + 2j*A\n mB = matrix(B)\n assert allclose(linalg.inv(B), mB.I)\n assert all(array(transpose(B) == mB.T))\n assert all(array(conjugate(transpose(B)) == mB.H))\n\n def test_pinv(self):\n x = matrix(arange(6).reshape(2,3))\n xpinv = matrix([[-0.77777778, 0.27777778],\n [-0.11111111, 0.11111111],\n [ 0.55555556, -0.05555556]])\n assert_almost_equal(x.I, xpinv)\n\n def test_comparisons(self):\n A = arange(100).reshape(10,10)\n mA = matrix(A)\n mB = matrix(A) + 0.1\n assert all(mB == A+0.1)\n assert all(mB == matrix(A+0.1))\n assert not any(mB == matrix(A-0.1))\n assert all(mA < mB)\n assert all(mA <= mB)\n assert all(mA <= mA)\n assert not any(mA < mA)\n\n assert not any(mB < mA)\n assert all(mB >= mA)\n assert all(mB >= mB)\n assert not any(mB > mB)\n\n assert all(mA == mA)\n assert not any(mA == mB)\n assert all(mB != mA)\n\n assert not all(abs(mA) > 0)\n assert all(abs(mB > 0))\n\n def test_asmatrix(self):\n A = arange(100).reshape(10,10)\n mA = asmatrix(A)\n A[0,0] = -10\n assert A[0,0] == mA[0,0]\n\n def test_noaxis(self):\n A = matrix([[1,0],[0,1]])\n assert A.sum() == matrix(2)\n assert A.mean() == matrix(0.5)\n\n def test_repr(self):\n A = matrix([[1,0],[0,1]])\n assert repr(A) == \"matrix([[1, 0],\\n [0, 1]])\"\n\nclass TestCasting(TestCase):\n def test_basic(self):\n A = arange(100).reshape(10,10)\n mA = matrix(A)\n\n mB = mA.copy()\n O = ones((10,10), float64) * 0.1\n mB = mB + O\n assert mB.dtype.type == float64\n assert all(mA != mB)\n assert all(mB == mA+0.1)\n\n mC = mA.copy()\n O = ones((10,10), complex128)\n mC = mC * O\n assert mC.dtype.type == complex128\n assert all(mA != mB)\n\n\nclass TestAlgebra(TestCase):\n def test_basic(self):\n import numpy.linalg as linalg\n\n A = array([[1., 2.],\n [3., 4.]])\n mA = matrix(A)\n\n B = identity(2)\n for i in xrange(6):\n assert allclose((mA ** i).A, B)\n B = dot(B, A)\n\n Ainv = linalg.inv(A)\n B = identity(2)\n for i in xrange(6):\n assert allclose((mA ** -i).A, B)\n B = dot(B, Ainv)\n\n assert allclose((mA * mA).A, dot(A, A))\n assert allclose((mA + mA).A, (A + A))\n assert allclose((3*mA).A, (3*A))\n\n mA2 = matrix(A)\n mA2 *= 3\n assert allclose(mA2.A, 3*A)\n\n def test_pow(self):\n \"\"\"Test raising a matrix to an integer power works as expected.\"\"\"\n m = matrix(\"1. 2.; 3. 4.\")\n m2 = m.copy()\n m2 **= 2\n mi = m.copy()\n mi **= -1\n m4 = m2.copy()\n m4 **= 2\n assert_array_almost_equal(m2, m**2)\n assert_array_almost_equal(m4, np.dot(m2, m2))\n assert_array_almost_equal(np.dot(mi, m), np.eye(2))\n\n def test_notimplemented(self):\n '''Check that 'not implemented' operations produce a failure.'''\n A = matrix([[1., 2.],\n [3., 4.]])\n\n # __rpow__\n try:\n 1.0**A\n except TypeError:\n pass\n else:\n self.fail(\"matrix.__rpow__ doesn't raise a TypeError\")\n\n # __mul__ with something not a list, ndarray, tuple, or scalar\n try:\n A*object()\n except TypeError:\n pass\n else:\n self.fail(\"matrix.__mul__ with non-numeric object doesn't raise\"\n \"a TypeError\")\n\nclass TestMatrixReturn(TestCase):\n def test_instance_methods(self):\n a = matrix([1.0], dtype='f8')\n methodargs = {\n 'astype' : ('intc',),\n 'clip' : (0.0, 1.0),\n 'compress' : ([1],),\n 'repeat' : (1,),\n 'reshape' : (1,),\n 'swapaxes' : (0,0),\n 'dot': np.array([1.0]),\n }\n excluded_methods = [\n 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield',\n 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize',\n 'searchsorted', 'setflags', 'setfield', 'sort', 'take',\n 'tofile', 'tolist', 'tostring', 'all', 'any', 'sum',\n 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp',\n 'prod', 'std', 'ctypes', 'itemset', 'setasflat'\n ]\n for attrib in dir(a):\n if attrib.startswith('_') or attrib in excluded_methods:\n continue\n f = getattr(a, attrib)\n if callable(f):\n # reset contents of a\n a.astype('f8')\n a.fill(1.0)\n if attrib in methodargs:\n args = methodargs[attrib]\n else:\n args = ()\n b = f(*args)\n assert type(b) is matrix, \"%s\" % attrib\n assert type(a.real) is matrix\n assert type(a.imag) is matrix\n c,d = matrix([0.0]).nonzero()\n assert type(c) is matrix\n assert type(d) is matrix\n\n\nclass TestIndexing(TestCase):\n def test_basic(self):\n x = asmatrix(zeros((3,2),float))\n y = zeros((3,1),float)\n y[:,0] = [0.8,0.2,0.3]\n x[:,1] = y>0.5\n assert_equal(x, [[0,1],[0,0],[0,0]])\n\n\nclass TestNewScalarIndexing(TestCase):\n def setUp(self):\n self.a = matrix([[1, 2],[3,4]])\n\n def test_dimesions(self):\n a = self.a\n x = a[0]\n assert_equal(x.ndim, 2)\n\n def test_array_from_matrix_list(self):\n a = self.a\n x = array([a, a])\n assert_equal(x.shape, [2,2,2])\n\n def test_array_to_list(self):\n a = self.a\n assert_equal(a.tolist(),[[1, 2], [3, 4]])\n\n def test_fancy_indexing(self):\n a = self.a\n x = a[1, [0,1,0]]\n assert isinstance(x, matrix)\n assert_equal(x, matrix([[3, 4, 3]]))\n x = a[[1,0]]\n assert isinstance(x, matrix)\n assert_equal(x, matrix([[3, 4], [1, 2]]))\n x = a[[[1],[0]],[[1,0],[0,1]]]\n assert isinstance(x, matrix)\n assert_equal(x, matrix([[4, 3], [1, 2]]))\n\n def test_matrix_element(self):\n x = matrix([[1,2,3],[4,5,6]])\n assert_equal(x[0][0],matrix([[1,2,3]]))\n assert_equal(x[0][0].shape,(1,3))\n assert_equal(x[0].shape,(1,3))\n assert_equal(x[:,0].shape,(2,1))\n\n x = matrix(0)\n assert_equal(x[0,0],0)\n assert_equal(x[0],0)\n assert_equal(x[:,0].shape,x.shape)\n\n def test_scalar_indexing(self):\n x = asmatrix(zeros((3,2),float))\n assert_equal(x[0,0],x[0][0])\n\n def test_row_column_indexing(self):\n x = asmatrix(np.eye(2))\n assert_array_equal(x[0,:],[[1,0]])\n assert_array_equal(x[1,:],[[0,1]])\n assert_array_equal(x[:,0],[[1],[0]])\n assert_array_equal(x[:,1],[[0],[1]])\n\n def test_boolean_indexing(self):\n A = arange(6)\n A.shape = (3,2)\n x = asmatrix(A)\n assert_array_equal(x[:,array([True,False])],x[:,0])\n assert_array_equal(x[array([True,False,False]),:],x[0,:])\n\n def test_list_indexing(self):\n A = arange(6)\n A.shape = (3,2)\n x = asmatrix(A)\n assert_array_equal(x[:,[1,0]],x[:,::-1])\n assert_array_equal(x[[2,1,0],:],x[::-1,:])\n\nclass TestPower(TestCase):\n def test_returntype(self):\n a = array([[0,1],[0,0]])\n assert type(matrix_power(a, 2)) is ndarray\n a = mat(a)\n assert type(matrix_power(a, 2)) is matrix\n\n def test_list(self):\n assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "\"\"\" Algorithms for clustering : Meanshift, Affinity propagation and spectral\nclustering.\n\n\"\"\"\n# Author: Alexandre Gramfort [email protected]\n# Gael Varoquaux [email protected]\n\n# License: BSD 3 clause\n\nimport numpy as np\nimport warnings\n\nfrom ..base import BaseEstimator, ClusterMixin\nfrom ..utils import as_float_array\nfrom ..metrics import euclidean_distances\n\n\ndef affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,\n damping=0.5, copy=True, verbose=False):\n \"\"\"Perform Affinity Propagation Clustering of data\n\n Parameters\n ----------\n\n S: array [n_samples, n_samples]\n Matrix of similarities between points\n\n preference: array [n_samples,] or float, optional, default: None\n Preferences for each point - points with larger values of\n preferences are more likely to be chosen as exemplars. The number of\n exemplars, i.e. of clusters, is influenced by the input preferences\n value. If the preferences are not passed as arguments, they will be\n set to the median of the input similarities (resulting in a moderate\n number of clusters). For a smaller amount of clusters, this can be set\n to the minimum value of the similarities.\n\n convergence_iter: int, optional, default: 15\n Number of iterations with no change in the number\n of estimated clusters that stops the convergence.\n\n max_iter: int, optional, default: 200\n Maximum number of iterations\n\n damping: float, optional, default: 0.5\n Damping factor between 0.5 and 1.\n\n copy: boolean, optional, default: True\n If copy is False, the affinity matrix is modified inplace by the\n algorithm, for memory efficiency\n\n verbose: boolean, optional, default: False\n The verbosity level\n\n Returns\n -------\n\n cluster_centers_indices: array [n_clusters]\n index of clusters centers\n\n labels : array [n_samples]\n cluster labels for each point\n\n Notes\n -----\n See examples/cluster/plot_affinity_propagation.py for an example.\n\n References\n ----------\n Brendan J. Frey and Delbert Dueck, \"Clustering by Passing Messages\n Between Data Points\", Science Feb. 2007\n \"\"\"\n S = as_float_array(S, copy=copy)\n n_samples = S.shape[0]\n\n if S.shape[0] != S.shape[1]:\n raise ValueError(\"S must be a square array (shape=%s)\" % repr(S.shape))\n\n if preference is None:\n preference = np.median(S)\n if damping < 0.5 or damping >= 1:\n raise ValueError('damping must be >= 0.5 and < 1')\n\n random_state = np.random.RandomState(0)\n\n # Place preference on the diagonal of S\n S.flat[::(n_samples + 1)] = preference\n\n A = np.zeros((n_samples, n_samples))\n R = np.zeros((n_samples, n_samples)) # Initialize messages\n\n # Remove degeneracies\n S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *\n random_state.randn(n_samples, n_samples))\n\n # Execute parallel affinity propagation updates\n e = np.zeros((n_samples, convergence_iter))\n\n ind = np.arange(n_samples)\n\n for it in range(max_iter):\n # Compute responsibilities\n Rold = R.copy()\n AS = A + S\n\n I = np.argmax(AS, axis=1)\n Y = AS[np.arange(n_samples), I] # np.max(AS, axis=1)\n\n AS[ind, I[ind]] = - np.finfo(np.double).max\n\n Y2 = np.max(AS, axis=1)\n R = S - Y[:, np.newaxis]\n\n R[ind, I[ind]] = S[ind, I[ind]] - Y2[ind]\n\n R = (1 - damping) * R + damping * Rold # Damping\n\n # Compute availabilities\n Aold = A\n Rp = np.maximum(R, 0)\n Rp.flat[::n_samples + 1] = R.flat[::n_samples + 1]\n\n A = np.sum(Rp, axis=0)[np.newaxis, :] - Rp\n\n dA = np.diag(A)\n A = np.minimum(A, 0)\n\n A.flat[::n_samples + 1] = dA\n\n A = (1 - damping) * A + damping * Aold # Damping\n\n # Check for convergence\n E = (np.diag(A) + np.diag(R)) > 0\n e[:, it % convergence_iter] = E\n K = np.sum(E, axis=0)\n\n if it >= convergence_iter:\n se = np.sum(e, axis=1)\n unconverged = (np.sum((se == convergence_iter) + (se == 0))\n != n_samples)\n if (not unconverged and (K > 0)) or (it == max_iter):\n if verbose:\n print(\"Converged after %d iterations.\" % it)\n break\n else:\n if verbose:\n print(\"Did not converge\")\n\n I = np.where(np.diag(A + R) > 0)[0]\n K = I.size # Identify exemplars\n\n if K > 0:\n c = np.argmax(S[:, I], axis=1)\n c[I] = np.arange(K) # Identify clusters\n # Refine the final set of exemplars and clusters and return results\n for k in range(K):\n ii = np.where(c == k)[0]\n j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))\n I[k] = ii[j]\n\n c = np.argmax(S[:, I], axis=1)\n c[I] = np.arange(K)\n labels = I[c]\n # Reduce labels to a sorted, gapless, list\n cluster_centers_indices = np.unique(labels)\n labels = np.searchsorted(cluster_centers_indices, labels)\n else:\n labels = np.empty((n_samples, 1))\n cluster_centers_indices = None\n labels.fill(np.nan)\n\n return cluster_centers_indices, labels\n\n\n###############################################################################\n\nclass AffinityPropagation(BaseEstimator, ClusterMixin):\n \"\"\"Perform Affinity Propagation Clustering of data\n\n Parameters\n ----------\n damping: float, optional, default: 0.5\n Damping factor between 0.5 and 1.\n\n convergence_iter: int, optional, default: 15\n Number of iterations with no change in the number\n of estimated clusters that stops the convergence.\n\n max_iter: int, optional, default: 200\n Maximum number of iterations\n\n copy: boolean, optional, default: True\n Make a copy of input data.\n\n preference: array [n_samples,] or float, optional, default: None\n Preferences for each point - points with larger values of\n preferences are more likely to be chosen as exemplars. The number\n of exemplars, ie of clusters, is influenced by the input\n preferences value. If the preferences are not passed as arguments,\n they will be set to the median of the input similarities.\n\n affinity: string, optional, default=``euclidean``\n Which affinity to use. At the moment ``precomputed`` and\n ``euclidean`` are supported. ``euclidean`` uses the\n negative squared euclidean distance between points.\n\n verbose: boolean, optional, default: False\n Whether to be verbose.\n\n\n Attributes\n ----------\n `cluster_centers_indices_` : array, [n_clusters]\n Indices of cluster centers\n\n `labels_` : array, [n_samples]\n Labels of each point\n\n `affinity_matrix_` : array-like, [n_samples, n_samples]\n Stores the affinity matrix used in ``fit``.\n\n Notes\n -----\n See examples/cluster/plot_affinity_propagation.py for an example.\n\n The algorithmic complexity of affinity propagation is quadratic\n in the number of points.\n\n References\n ----------\n\n Brendan J. Frey and Delbert Dueck, \"Clustering by Passing Messages\n Between Data Points\", Science Feb. 2007\n \"\"\"\n\n def __init__(self, damping=.5, max_iter=200, convergence_iter=15,\n copy=True, preference=None, affinity='euclidean',\n verbose=False):\n\n self.damping = damping\n self.max_iter = max_iter\n self.convergence_iter = convergence_iter\n self.copy = copy\n self.verbose = verbose\n self.preference = preference\n self.affinity = affinity\n\n @property\n def _pairwise(self):\n return self.affinity is \"precomputed\"\n\n def fit(self, X):\n \"\"\" Create affinity matrix from negative euclidean distances, then\n apply affinity propagation clustering.\n\n Parameters\n ----------\n\n X: array [n_samples, n_features] or [n_samples, n_samples]\n Data matrix or, if affinity is ``precomputed``, matrix of\n similarities / affinities.\n \"\"\"\n\n if X.shape[0] == X.shape[1] and not self._pairwise:\n warnings.warn(\"The API of AffinityPropagation has changed.\"\n \"Now ``fit`` constructs an affinity matrix from the\"\n \" data. To use a custom affinity matrix, set \"\n \"``affinity=precomputed``.\")\n if self.affinity is \"precomputed\":\n self.affinity_matrix_ = X\n elif self.affinity is \"euclidean\":\n self.affinity_matrix_ = -euclidean_distances(X, squared=True)\n else:\n raise ValueError(\"Affinity must be 'precomputed' or \"\n \"'euclidean'. Got %s instead\"\n % str(self.affinity))\n\n self.cluster_centers_indices_, self.labels_ = affinity_propagation(\n self.affinity_matrix_, self.preference, max_iter=self.max_iter,\n convergence_iter=self.convergence_iter, damping=self.damping,\n copy=self.copy, verbose=self.verbose)\n return self\n" ]
[ [ "sklearn.utils.extmath.randomized_svd", "sklearn.utils.extmath.make_nonnegative", "sklearn.externals.six.with_metaclass", "numpy.log", "scipy.sparse.issparse", "sklearn.utils.validation.check_arrays", "sklearn.utils.validation.assert_all_finite", "numpy.isnan", "sklearn.utils.extmath.safe_sparse_dot", "sklearn.cluster.KMeans", "sklearn.utils.arpack.svds", "numpy.log2", "sklearn.utils.extmath.norm", "numpy.apply_along_axis", "numpy.argsort", "sklearn.cluster.MiniBatchKMeans", "scipy.sparse.dia_matrix", "numpy.vstack" ], [ "numpy.log", "numpy.minimum", "scipy.sparse.issparse", "numpy.asarray", "numpy.ascontiguousarray", "numpy.ones", "numpy.mean", "numpy.argmin", "numpy.iinfo", "numpy.where", "numpy.var", "numpy.zeros", "numpy.sum", "numpy.empty" ], [ "numpy.logical_not", "numpy.arange", "numpy.ones", "numpy.argmax", "numpy.where", "numpy.ravel", "numpy.zeros", "numpy.sum" ], [ "numpy.unique", "numpy.asarray", "numpy.setdiff1d", "numpy.flatnonzero", "numpy.intersect1d", "numpy.searchsorted", "numpy.array" ], [ "numpy.intp", "numpy.int64", "numpy.int_" ], [ "numpy.zeros", "numpy.sum", "numpy.min" ], [ "sklearn.utils.testing.assert_array_almost_equal", "sklearn.cross_validation.train_test_split", "sklearn.datasets.make_classification", "sklearn.utils.testing.assert_raises", "sklearn.utils.testing.all_estimators", "sklearn.base.clone", "numpy.int", "sklearn.utils.testing.assert_true", "numpy.mean", "numpy.any", "sklearn.datasets.load_boston", "sklearn.metrics.adjusted_rand_score", "sklearn.metrics.f1_score", "sklearn.datasets.make_blobs", "sklearn.lda.LDA", "numpy.unique", "sklearn.utils.testing.assert_greater", "numpy.argmax", "sklearn.utils.testing.set_random_state", "sklearn.datasets.load_iris", "scipy.sparse.csr_matrix", "sklearn.utils.testing.assert_array_equal", "numpy.random.RandomState", "numpy.sum", "sklearn.utils.testing.assert_equal", "sklearn.utils.shuffle", "numpy.ones", "sklearn.preprocessing.StandardScaler", "sklearn.metrics.accuracy_score" ], [ "scipy.sparse.issparse", "numpy.isfinite", "numpy.ascontiguousarray", "numpy.asarray", "numpy.atleast_2d", "numpy.shape", "numpy.ravel", "numpy.array", "numpy.random.RandomState" ], [ "numpy.matrix", "numpy.dot", "numpy.linalg.inv", "numpy.arange", "numpy.eye", "numpy.matrixlib.defmatrix.matrix_power", "numpy.bmat", "numpy.asmatrix", "numpy.matrixlib.mat", "numpy.array" ], [ "numpy.diag", "numpy.maximum", "numpy.minimum", "numpy.unique", "numpy.arange", "numpy.median", "numpy.finfo", "numpy.max", "numpy.argmax", "numpy.where", "numpy.searchsorted", "numpy.random.RandomState", "numpy.zeros", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sailxjx/DI-engine
[ "c6763f8e2ba885a2a02f611195a1b5f8b50bff00", "c6763f8e2ba885a2a02f611195a1b5f8b50bff00", "c6763f8e2ba885a2a02f611195a1b5f8b50bff00", "c6763f8e2ba885a2a02f611195a1b5f8b50bff00", "c6763f8e2ba885a2a02f611195a1b5f8b50bff00", "c6763f8e2ba885a2a02f611195a1b5f8b50bff00", "c6763f8e2ba885a2a02f611195a1b5f8b50bff00" ]
[ "ding/utils/time_helper.py", "ding/hpc_rl/tests/test_lstm.py", "ding/model/template/ppg.py", "dizoo/overcooked/envs/overcooked_env.py", "ding/policy/a2c.py", "ding/rl_utils/tests/test_ppg.py", "ding/policy/cql.py" ]
[ "import signal\nimport time\nfrom typing import Any, Callable\n\nimport torch\nfrom easydict import EasyDict\nfrom .time_helper_base import TimeWrapper\nfrom .time_helper_cuda import get_cuda_time_wrapper\n\n\ndef build_time_helper(cfg: EasyDict = None, wrapper_type: str = None) -> Callable[[], 'TimeWrapper']:\n r\"\"\"\n Overview:\n Build the timehelper\n\n Arguments:\n - cfg (:obj:`dict`):\n The config file, which is a multilevel dict, have large domain like\n evaluate, common, model, train etc, and each large domain\n has it's smaller domain.\n - wrapper_type (:obj:`str`): The type of wrapper returned, support ``['time', 'cuda']``\n\n Returns:\n - time_wrapper (:obj:`TimeWrapper`):\n Return the corresponding timewrapper, Reference: ``ding.utils.timehelper.TimeWrapperTime``\n and ``ding.utils.timehelper.get_cuda_time_wrapper``.\n \"\"\"\n # Note: wrapper_type has higher priority\n if wrapper_type is not None:\n time_wrapper_type = wrapper_type\n elif cfg is not None:\n time_wrapper_type = cfg.common.time_wrapper_type\n else:\n raise RuntimeError('Either wrapper_type or cfg should be provided.')\n\n if time_wrapper_type == 'time':\n return TimeWrapperTime\n elif time_wrapper_type == 'cuda':\n if torch.cuda.is_available():\n # lazy initialize to make code runnable locally\n return get_cuda_time_wrapper()\n else:\n return TimeWrapperTime\n else:\n raise KeyError('invalid time_wrapper_type: {}'.format(time_wrapper_type))\n\n\nclass EasyTimer:\n r\"\"\"\n Overview:\n A decent timer wrapper that can be used easily.\n\n Interface:\n ``__init__``, ``__enter__``, ``__exit__``\n\n Example:\n >>> wait_timer = EasyTimer()\n >>> with wait_timer:\n >>> func(...)\n >>> time_ = wait_timer.value # in second\n \"\"\"\n\n def __init__(self, cuda=True):\n r\"\"\"\n Overview:\n Init class EasyTimer\n\n Arguments:\n - cuda (:obj:`bool`): Whether to build timer with cuda type\n \"\"\"\n if torch.cuda.is_available() and cuda:\n time_wrapper_type = \"cuda\"\n else:\n time_wrapper_type = \"time\"\n self._timer = build_time_helper(wrapper_type=time_wrapper_type)\n self.value = 0.0\n\n def __enter__(self):\n r\"\"\"\n Overview:\n Enter timer, start timing\n \"\"\"\n self.value = 0.0\n self._timer.start_time()\n\n def __exit__(self, *args):\n r\"\"\"\n Overview:\n Exit timer, stop timing\n \"\"\"\n self.value = self._timer.end_time()\n\n\nclass TimeWrapperTime(TimeWrapper):\n r\"\"\"\n Overview:\n A class method that inherit from ``TimeWrapper`` class\n\n Interface:\n ``start_time``, ``end_time``\n \"\"\"\n\n # overwrite\n @classmethod\n def start_time(cls):\n r\"\"\"\n Overview:\n Implement and overide the ``start_time`` method in ``TimeWrapper`` class\n \"\"\"\n cls.start = time.time()\n\n # overwrite\n @classmethod\n def end_time(cls):\n r\"\"\"\n Overview:\n Implement and overide the end_time method in ``TimeWrapper`` class\n\n Returns:\n - time(:obj:`float`): The time between ``start_time`` and end_time\n \"\"\"\n cls.end = time.time()\n return cls.end - cls.start\n\n\nclass WatchDog(object):\n \"\"\"\n Overview:\n Simple watchdog timer to detect timeouts\n\n Arguments:\n - timeout (:obj:`int`): Timeout value of the ``watchdog [seconds]``.\n\n .. note::\n If it is not reset before exceeding this value, ``TimeourError`` raised.\n\n Interface:\n ``start``, ``stop``\n\n Examples:\n >>> watchdog = WatchDog(x) # x is a timeout value\n >>> ...\n >>> watchdog.start()\n >>> ... # Some function\n\n \"\"\"\n\n def __init__(self, timeout: int = 1):\n self._timeout = timeout + 1\n self._failed = False\n\n def start(self):\n r\"\"\"\n Overview:\n Start watchdog.\n \"\"\"\n signal.signal(signal.SIGALRM, self._event)\n signal.alarm(self._timeout)\n\n @staticmethod\n def _event(signum: Any, frame: Any):\n raise TimeoutError()\n\n def stop(self):\n r\"\"\"\n Overview:\n Stop watchdog with ``alarm(0)``, ``SIGALRM``, and ``SIG_DFL`` signals.\n \"\"\"\n signal.alarm(0)\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n", "import time\nimport torch\nfrom hpc_rll.origin.rnn import get_lstm\nfrom hpc_rll.torch_utils.network.rnn import LSTM\nfrom testbase import mean_relative_error, times\n\nassert torch.cuda.is_available()\nuse_cuda = True\n\nseq_len = 64\nbatch_size = 3\ninput_size = 1792\nhidden_size = 384\nnum_layers = 3\nnorm_type = 'LN'\ndropout = 0 # 0.1\n\n\n# Note: need open load_params for hpc_lstm to validation\n# Note: only used to case of num_layers = 3\ndef lstm_val():\n ori_lstm = get_lstm('normal', input_size, hidden_size, num_layers, norm_type, dropout)\n hpc_lstm = LSTM(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)\n\n ori_x = torch.randn(seq_len, batch_size, input_size)\n ori_h0 = torch.randn(num_layers, batch_size, hidden_size)\n ori_c0 = torch.randn(num_layers, batch_size, hidden_size)\n\n if use_cuda:\n ori_x = ori_x.cuda()\n ori_h0 = ori_h0.cuda()\n ori_c0 = ori_c0.cuda()\n ori_lstm = ori_lstm.cuda()\n hpc_lstm = hpc_lstm.cuda()\n\n ori_x.requires_grad_(True)\n ori_output, ori_next_state = ori_lstm(ori_x, [ori_h0, ori_c0])\n ori_loss = ori_output.mean()\n ori_loss.backward()\n\n hpc_x = ori_x.clone().detach()\n hpc_h0 = ori_h0.clone().detach()\n hpc_c0 = ori_c0.clone().detach()\n hpc_x.requires_grad_(True)\n hpc_output, hpc_next_state = hpc_lstm(hpc_x, [hpc_h0, hpc_c0])\n hpc_loss = hpc_output.mean()\n hpc_loss.backward()\n torch.cuda.synchronize()\n\n mre = mean_relative_error(\n torch.flatten(ori_loss).cpu().detach().numpy(),\n torch.flatten(hpc_loss).cpu().detach().numpy()\n )\n print(\"lstm fp mean_relative_error: \" + str(mre))\n mre = mean_relative_error(\n torch.flatten(ori_x.grad).cpu().detach().numpy(),\n torch.flatten(hpc_x.grad).cpu().detach().numpy()\n )\n print(\"lstm bp mean_relative_error: \" + str(mre))\n\n ori_wx_grad = torch.cat((ori_lstm.wx[0].grad, ori_lstm.wx[1].grad, ori_lstm.wx[2].grad))\n hpc_wx_grad = hpc_lstm.wx.grad\n mre = mean_relative_error(torch.flatten(ori_wx_grad).cpu().numpy(), torch.flatten(hpc_wx_grad).cpu().numpy())\n print(\"wx grad mean_relative_error: \" + str(mre))\n\n ori_wh_grad = torch.cat((ori_lstm.wh[0].grad, ori_lstm.wh[1].grad, ori_lstm.wh[2].grad))\n hpc_wh_grad = hpc_lstm.wh.grad\n mre = mean_relative_error(torch.flatten(ori_wh_grad).cpu().numpy(), torch.flatten(hpc_wh_grad).cpu().numpy())\n print(\"wh grad mean_relative_error: \" + str(mre))\n\n ori_bias_grad = ori_lstm.bias.grad\n hpc_bias_grad = hpc_lstm.bias.grad\n mre = mean_relative_error(torch.flatten(ori_bias_grad).cpu().numpy(), torch.flatten(hpc_bias_grad).cpu().numpy())\n print(\"bias grad mean_relative_error: \" + str(mre))\n\n params = list(ori_lstm.parameters())\n gamma_0_x = params[1]\n beta_0_x = params[2]\n gamma_0_h = params[3]\n beta_0_h = params[4]\n gamma_1_x = params[5]\n beta_1_x = params[6]\n gamma_1_h = params[7]\n beta_1_h = params[8]\n gamma_2_x = params[9]\n beta_2_x = params[10]\n gamma_2_h = params[11]\n beta_2_h = params[12]\n ori_gamma_grad = torch.cat(\n (gamma_0_x.grad, gamma_0_h.grad, gamma_1_x.grad, gamma_1_h.grad, gamma_2_x.grad, gamma_2_h.grad)\n )\n ori_beta_grad = torch.cat(\n (beta_0_x.grad, beta_0_h.grad, beta_1_x.grad, beta_1_h.grad, beta_2_x.grad, beta_2_h.grad)\n )\n hpc_gamma_grad = hpc_lstm.ln_gamma.grad\n hpc_beta_grad = hpc_lstm.ln_beta.grad\n mre = mean_relative_error(torch.flatten(ori_gamma_grad).cpu().numpy(), torch.flatten(hpc_gamma_grad).cpu().numpy())\n print(\"ln gamma grad mean_relative_error: \" + str(mre))\n mre = mean_relative_error(torch.flatten(ori_beta_grad).cpu().numpy(), torch.flatten(hpc_beta_grad).cpu().numpy())\n print(\"ln beta grad mean_relative_error: \" + str(mre))\n\n\ndef lstm_perf():\n ori_lstm = get_lstm('normal', input_size, hidden_size, num_layers, norm_type, dropout)\n hpc_lstm = LSTM(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)\n\n lstms = {'normal': ori_lstm, 'hpc': hpc_lstm}\n\n for lstm_type, lstm in lstms.items():\n x = torch.rand(seq_len, batch_size, input_size)\n h0 = torch.randn(num_layers, batch_size, hidden_size)\n c0 = torch.randn(num_layers, batch_size, hidden_size)\n if use_cuda:\n x = x.cuda()\n h0 = h0.cuda()\n c0 = c0.cuda()\n lstm = lstm.cuda()\n\n prev_state = [h0, c0]\n x.requires_grad_(True)\n for i in range(times):\n t = time.time()\n output, _ = lstm(x, prev_state)\n loss = output.mean()\n loss.backward()\n if use_cuda:\n torch.cuda.synchronize()\n print('epoch: {}, {} lstm cost time: {}'.format(i, lstm_type, time.time() - t))\n\n\nif __name__ == '__main__':\n print(\n \"target problem: seq_len = {}, batch_size = {}, input_size = {}, hidden_size = {}, num_layers = {}, norm_type = {}, dropout = {}\" # noqa\n .format(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)\n )\n print(\"==============lstm has no validation test================\")\n #print(\"===============run lstm validation test==================\")\n #lstm_val()\n print(\"===============run lstm performance test=================\")\n lstm_perf()\n", "from typing import Optional, Dict, Union\nimport copy\nimport torch\nimport torch.nn as nn\nfrom ding.utils import SequenceType, MODEL_REGISTRY\nfrom .vac import VAC\n\n\n@MODEL_REGISTRY.register('ppg')\nclass PPG(nn.Module):\n mode = ['compute_actor', 'compute_critic', 'compute_actor_critic']\n\n def __init__(\n self,\n obs_shape: Union[int, SequenceType],\n action_shape: Union[int, SequenceType],\n share_encoder: bool = True,\n continuous: bool = False,\n encoder_hidden_size_list: SequenceType = [128, 128, 64],\n actor_head_hidden_size: int = 64,\n actor_head_layer_num: int = 2,\n critic_head_hidden_size: int = 64,\n critic_head_layer_num: int = 1,\n activation: Optional[nn.Module] = nn.ReLU(),\n norm_type: Optional[str] = None,\n ) -> None:\n super(PPG, self).__init__()\n self.actor_critic = VAC(\n obs_shape, action_shape, share_encoder, continuous, encoder_hidden_size_list, actor_head_hidden_size,\n actor_head_layer_num, critic_head_hidden_size, critic_head_layer_num, activation, norm_type\n )\n self.aux_critic = copy.deepcopy(self.actor_critic.critic)\n\n def forward(self, inputs: Union[torch.Tensor, Dict], mode: str) -> Dict:\n assert mode in self.mode, \"not support forward mode: {}/{}\".format(mode, self.mode)\n return getattr(self, mode)(inputs)\n\n def compute_actor(self, x: torch.Tensor) -> Dict:\n \"\"\"\n ReturnsKeys:\n - necessary: ``logit``\n \"\"\"\n return self.actor_critic(x, mode='compute_actor')\n\n def compute_critic(self, x: torch.Tensor) -> Dict:\n \"\"\"\n ReturnsKeys:\n - necessary: ``value``\n \"\"\"\n x = self.aux_critic[0](x) # encoder\n x = self.aux_critic[1](x) # head\n return {'value': x['pred']}\n\n def compute_actor_critic(self, x: torch.Tensor) -> Dict:\n \"\"\"\n .. note::\n\n ``compute_actor_critic`` interface aims to save computation when shares encoder\n ReturnsKeys:\n - necessary: ``value``, ``logit``\n \"\"\"\n return self.actor_critic(x, mode='compute_actor_critic')\n", "from namedlist import namedlist\nimport numpy as np\nimport gym\nfrom typing import Any, Union, List\nimport copy\n\nfrom overcooked_ai_py.mdp.actions import Action, Direction\nfrom overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\nfrom overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n\nfrom ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo\nfrom ding.envs.common.env_element import EnvElement, EnvElementInfo\nfrom ding.utils import ENV_REGISTRY\n\nOvercookEnvTimestep = namedlist('OvercookEnvTimestep', ['obs', 'reward', 'done', 'info'])\nOvercookEnvInfo = namedlist('OvercookEnvInfo', ['agent_num', 'obs_space', 'act_space', 'rew_space'])\n\n# n, s = Direction.NORTH, Direction.SOUTH\n# e, w = Direction.EAST, Direction.WEST\n# stay, interact = Action.STAY, Action.INTERACT\n# Action.ALL_ACTIONS: [n, s, e, w, stay, interact]\n\n\n@ENV_REGISTRY.register('overcooked')\nclass OvercookEnv(BaseEnv):\n\n def __init__(self, cfg) -> None:\n self._cfg = cfg\n self._env_name = cfg.get(\"env_name\", \"cramped_room\")\n self._horizon = cfg.get(\"horizon\", 400)\n self._concat_obs = cfg.get(\"concat_obs\", False)\n self._action_mask = cfg.get(\"action_mask\", True)\n self._use_shaped_reward = cfg.get(\"use_shaped_reward\", True)\n self.mdp = OvercookedGridworld.from_layout_name(self._env_name)\n self.base_env = OvercookedEnv.from_mdp(self.mdp, horizon=self._horizon, info_level=0)\n featurize_fn = lambda mdp, state: mdp.lossless_state_encoding(state)\n self.featurize_fn = featurize_fn\n self.action_dim = len(Action.ALL_ACTIONS)\n self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))\n # rightnow overcook environment encoding only support 2 agent game\n self.agent_num = 2\n # set up obs shape\n dummy_mdp = self.base_env.mdp\n dummy_state = dummy_mdp.get_standard_start_state()\n self.obs_shape = self.featurize_fn(dummy_mdp, dummy_state)[0].shape\n\n def seed(self, seed: int, dynamic_seed: bool = True) -> None:\n self._seed = seed\n self._dynamic_seed = dynamic_seed\n np.random.seed(self._seed)\n\n def close(self) -> None:\n # Note: the real env instance only has a empty close method, only pas\n pass\n\n def step(self, action):\n if isinstance(action, list):\n action = np.concatenate(action)\n assert all(self.action_space.contains(a) for a in action), \"%r (%s) invalid\" % (action, type(action))\n agent_action, other_agent_action = [Action.INDEX_TO_ACTION[a] for a in action]\n\n if self.agent_idx == 0:\n joint_action = (agent_action, other_agent_action)\n else:\n joint_action = (other_agent_action, agent_action)\n\n next_state, reward, done, env_info = self.base_env.step(joint_action)\n if self._use_shaped_reward:\n reward += env_info['shaped_r_by_agent'][0]\n reward += env_info['shaped_r_by_agent'][1]\n\n reward = np.array([float(reward)])\n self._final_eval_reward += reward\n ob_p0, ob_p1 = self.featurize_fn(self.mdp, next_state)\n if self.agent_idx == 0:\n both_agents_ob = [ob_p0, ob_p1]\n else:\n both_agents_ob = [ob_p1, ob_p0]\n if self._concat_obs:\n both_agents_ob = np.concatenate(both_agents_ob)\n else:\n both_agents_ob = np.stack(both_agents_ob)\n\n env_info[\"policy_agent_idx\"] = self.agent_idx\n env_info[\"final_eval_reward\"] = self._final_eval_reward\n\n action_mask = self.get_action_mask()\n if self._action_mask:\n obs = {\n \"agent_state\": both_agents_ob,\n \"overcooked_state\": self.base_env.state,\n \"other_agent_env_idx\": 1 - self.agent_idx,\n \"action_mask\": action_mask\n }\n else:\n obs = both_agents_ob\n return OvercookEnvTimestep(obs, reward, done, env_info)\n\n def reset(self):\n self.base_env.reset()\n self._final_eval_reward = 0\n self.mdp = self.base_env.mdp\n # random init agent index\n self.agent_idx = np.random.choice([0, 1])\n ob_p0, ob_p1 = self.featurize_fn(self.mdp, self.base_env.state)\n\n if self.agent_idx == 0:\n both_agents_ob = [ob_p0, ob_p1]\n else:\n both_agents_ob = [ob_p1, ob_p0]\n if self._concat_obs:\n both_agents_ob = np.concatenate(both_agents_ob)\n else:\n both_agents_ob = np.stack(both_agents_ob)\n\n action_mask = self.get_action_mask()\n\n if self._action_mask:\n obs = {\n \"agent_state\": both_agents_ob,\n \"overcooked_state\": self.base_env.state,\n \"other_agent_env_idx\": 1 - self.agent_idx,\n \"action_mask\": action_mask\n }\n else:\n obs = both_agents_ob\n return obs\n\n def get_available_actions(self):\n return self.mdp.get_actions(self.base_env.state)\n\n def get_action_mask(self):\n available_actions = self.get_available_actions()\n\n action_masks = np.zeros((2, self.action_dim))\n\n for i in range(self.action_dim):\n if Action.INDEX_TO_ACTION[i] in available_actions[0]:\n action_masks[0][i] = 1\n if Action.INDEX_TO_ACTION[i] in available_actions[1]:\n action_masks[1][i] = 1\n\n return action_masks\n\n def info(self):\n T = EnvElementInfo\n if self._concat_obs:\n agent_state = list(self.obs_shape)\n agent_state[0] = agent_state[0] * 2\n agent_state = tuple(agent_state)\n else:\n agent_state = (self.agent_num, self.obs_shape)\n env_info = OvercookEnvInfo(\n agent_num=self.agent_num,\n obs_space=T({\n 'agent_state': agent_state,\n 'action_mask': (self.agent_num, self.action_dim),\n }, None),\n act_space=T((self.agent_num, self.action_dim), None),\n rew_space=T((1, ), None)\n )\n return env_info\n\n def __repr__(self):\n pass\n\n\n@ENV_REGISTRY.register('overcooked_game')\nclass OvercookGameEnv(BaseEnv):\n\n def __init__(self, cfg) -> None:\n self._cfg = cfg\n self._env_name = cfg.get(\"env_name\", \"cramped_room\")\n self._horizon = cfg.get(\"horizon\", 400)\n self._concat_obs = cfg.get(\"concat_obs\", False)\n self._action_mask = cfg.get(\"action_mask\", False)\n self._use_shaped_reward = cfg.get(\"use_shaped_reward\", True)\n self.mdp = OvercookedGridworld.from_layout_name(self._env_name)\n self.base_env = OvercookedEnv.from_mdp(self.mdp, horizon=self._horizon, info_level=0)\n featurize_fn = lambda mdp, state: mdp.lossless_state_encoding(state)\n self.featurize_fn = featurize_fn\n self.action_dim = len(Action.ALL_ACTIONS)\n self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS))\n # rightnow overcook environment encoding only support 2 agent game\n self.agent_num = 2\n # set up obs shape\n dummy_mdp = self.base_env.mdp\n dummy_state = dummy_mdp.get_standard_start_state()\n self.obs_shape = self.featurize_fn(dummy_mdp, dummy_state)[0].shape\n\n def seed(self, seed: int, dynamic_seed: bool = True) -> None:\n self._seed = seed\n self._dynamic_seed = dynamic_seed\n np.random.seed(self._seed)\n\n def close(self) -> None:\n # Note: the real env instance only has a empty close method, only pas\n pass\n\n def step(self, action):\n if isinstance(action, list):\n action = np.array(action).astype(np.int)\n if action.shape == (2, 1):\n action = [action[0][0], action[1][0]]\n assert all(self.action_space.contains(a) for a in action), \"%r (%s) invalid\" % (action, type(action))\n agent_action, other_agent_action = [Action.INDEX_TO_ACTION[a] for a in action]\n\n if self.agent_idx == 0:\n joint_action = (agent_action, other_agent_action)\n else:\n joint_action = (other_agent_action, agent_action)\n\n next_state, reward, done, env_info = self.base_env.step(joint_action)\n\n reward = np.array([float(reward)])\n self._final_eval_reward += reward\n if self._use_shaped_reward:\n self._final_eval_reward += env_info['shaped_r_by_agent'][0]\n self._final_eval_reward += env_info['shaped_r_by_agent'][1]\n rewards = np.array([reward, reward]).astype(np.float32)\n if self._use_shaped_reward:\n rewards[0] += env_info['shaped_r_by_agent'][0]\n rewards[1] += env_info['shaped_r_by_agent'][1]\n ob_p0, ob_p1 = self.featurize_fn(self.mdp, next_state)\n if self.agent_idx == 0:\n both_agents_ob = [ob_p0, ob_p1]\n else:\n both_agents_ob = [ob_p1, ob_p0]\n if self._concat_obs:\n both_agents_ob = np.concatenate(both_agents_ob)\n else:\n both_agents_ob = np.stack(both_agents_ob)\n\n env_info[\"policy_agent_idx\"] = self.agent_idx\n env_info[\"final_eval_reward\"] = self._final_eval_reward\n\n action_mask = self.get_action_mask()\n if self._action_mask:\n obs = {\n \"agent_state\": both_agents_ob,\n \"overcooked_state\": self.base_env.state,\n \"other_agent_env_idx\": 1 - self.agent_idx,\n \"action_mask\": action_mask\n }\n else:\n obs = both_agents_ob\n return OvercookEnvTimestep(obs, rewards, done, [env_info, env_info])\n\n def reset(self):\n self.base_env.reset()\n self._final_eval_reward = 0\n self.mdp = self.base_env.mdp\n # random init agent index\n self.agent_idx = np.random.choice([0, 1])\n #fix init agent index\n self.agent_idx = 0\n ob_p0, ob_p1 = self.featurize_fn(self.mdp, self.base_env.state)\n\n if self.agent_idx == 0:\n both_agents_ob = [ob_p0, ob_p1]\n else:\n both_agents_ob = [ob_p1, ob_p0]\n if self._concat_obs:\n both_agents_ob = np.concatenate(both_agents_ob)\n else:\n both_agents_ob = np.stack(both_agents_ob)\n\n action_mask = self.get_action_mask()\n\n if self._action_mask:\n obs = {\n \"agent_state\": both_agents_ob,\n \"overcooked_state\": self.base_env.state,\n \"other_agent_env_idx\": 1 - self.agent_idx,\n \"action_mask\": action_mask\n }\n else:\n obs = both_agents_ob\n return obs\n\n def get_available_actions(self):\n return self.mdp.get_actions(self.base_env.state)\n\n def get_action_mask(self):\n available_actions = self.get_available_actions()\n\n action_masks = np.zeros((2, self.action_dim))\n\n for i in range(self.action_dim):\n if Action.INDEX_TO_ACTION[i] in available_actions[0]:\n action_masks[0][i] = 1\n if Action.INDEX_TO_ACTION[i] in available_actions[1]:\n action_masks[1][i] = 1\n\n return action_masks\n\n def info(self):\n T = EnvElementInfo\n if self._concat_obs:\n agent_state = list(self.obs_shape)\n agent_state[0] = agent_state[0] * 2\n agent_state = tuple(agent_state)\n else:\n agent_state = (self.agent_num, self.obs_shape)\n env_info = OvercookEnvInfo(\n agent_num=self.agent_num,\n obs_space=T({\n 'agent_state': agent_state,\n 'action_mask': (self.agent_num, self.action_dim),\n }, None),\n act_space=T((self.agent_num, self.action_dim), None),\n rew_space=T((1, ), None)\n )\n return env_info\n\n def __repr__(self):\n return \"DI-engine Overcooked GameEnv\"", "from typing import List, Dict, Any, Tuple, Union\nfrom collections import namedtuple\nimport torch\n\nfrom ding.rl_utils import a2c_data, a2c_error, get_gae_with_default_last_value, get_train_sample\nfrom ding.torch_utils import Adam, to_device\nfrom ding.model import model_wrap\nfrom ding.utils import POLICY_REGISTRY\nfrom ding.utils.data import default_collate, default_decollate\nfrom .base_policy import Policy\nfrom .common_utils import default_preprocess_learn\n\n\n@POLICY_REGISTRY.register('a2c')\nclass A2CPolicy(Policy):\n r\"\"\"\n Overview:\n Policy class of A2C algorithm.\n \"\"\"\n config = dict(\n # (string) RL policy register name (refer to function \"register_policy\").\n type='a2c',\n # (bool) Whether to use cuda for network.\n cuda=False,\n # (bool) whether use on-policy training pipeline(behaviour policy and training policy are the same)\n on_policy=True, # for a2c strictly on policy algorithm, this line should not be seen by users\n priority=False,\n # (bool) Whether use Importance Sampling Weight to correct biased update. If True, priority must be True.\n priority_IS_weight=False,\n learn=dict(\n # (bool) Whether to use multi gpu\n multi_gpu=False,\n # (int) for a2c, update_per_collect must be 1.\n update_per_collect=1, # fixed value, this line should not be modified by users\n batch_size=64,\n learning_rate=0.001,\n # (List[float])\n betas=(0.9, 0.999),\n # (float)\n eps=1e-8,\n # (float)\n grad_norm=0.5,\n # ==============================================================\n # The following configs is algorithm-specific\n # ==============================================================\n # (float) loss weight of the value network, the weight of policy network is set to 1\n value_weight=0.5,\n # (float) loss weight of the entropy regularization, the weight of policy network is set to 1\n entropy_weight=0.01,\n # (bool) Whether to normalize advantage. Default to False.\n adv_norm=False,\n ignore_done=False,\n ),\n collect=dict(\n # (int) collect n_sample data, train model n_iteration times\n # n_sample=80,\n unroll_len=1,\n # ==============================================================\n # The following configs is algorithm-specific\n # ==============================================================\n # (float) discount factor for future reward, defaults int [0, 1]\n discount_factor=0.9,\n # (float) the trade-off factor lambda to balance 1step td and mc\n gae_lambda=0.95,\n ),\n eval=dict(),\n )\n\n def _init_learn(self) -> None:\n r\"\"\"\n Overview:\n Learn mode init method. Called by ``self.__init__``.\n Init the optimizer, algorithm config, main and target models.\n \"\"\"\n # Optimizer\n self._optimizer = Adam(\n self._model.parameters(),\n lr=self._cfg.learn.learning_rate,\n betas=self._cfg.learn.betas,\n eps=self._cfg.learn.eps\n )\n\n # Algorithm config\n self._priority = self._cfg.priority\n self._priority_IS_weight = self._cfg.priority_IS_weight\n self._value_weight = self._cfg.learn.value_weight\n self._entropy_weight = self._cfg.learn.entropy_weight\n self._adv_norm = self._cfg.learn.adv_norm\n self._grad_norm = self._cfg.learn.grad_norm\n\n # Main and target models\n self._learn_model = model_wrap(self._model, wrapper_name='base')\n self._learn_model.reset()\n\n def _forward_learn(self, data: dict) -> Dict[str, Any]:\n r\"\"\"\n Overview:\n Forward and backward function of learn mode.\n Arguments:\n - data (:obj:`dict`): Dict type data, including at least ['obs', 'action', 'reward', 'next_obs','adv']\n Returns:\n - info_dict (:obj:`Dict[str, Any]`): Including current lr and loss.\n \"\"\"\n data = default_preprocess_learn(data, ignore_done=self._cfg.learn.ignore_done, use_nstep=False)\n if self._cuda:\n data = to_device(data, self._device)\n self._learn_model.train()\n # forward\n output = self._learn_model.forward(data['obs'], mode='compute_actor_critic')\n\n adv = data['adv']\n return_ = data['value'] + adv\n if self._adv_norm:\n # norm adv in total train_batch\n adv = (adv - adv.mean()) / (adv.std() + 1e-8)\n data = a2c_data(output['logit'], data['action'], output['value'], adv, return_, data['weight'])\n\n # Calculate A2C loss\n a2c_loss = a2c_error(data)\n wv, we = self._value_weight, self._entropy_weight\n total_loss = a2c_loss.policy_loss + wv * a2c_loss.value_loss - we * a2c_loss.entropy_loss\n\n # ====================\n # A2C-learning update\n # ====================\n\n self._optimizer.zero_grad()\n total_loss.backward()\n\n grad_norm = torch.nn.utils.clip_grad_norm_(\n list(self._learn_model.parameters()),\n max_norm=self._grad_norm,\n )\n self._optimizer.step()\n\n # =============\n # after update\n # =============\n return {\n 'cur_lr': self._optimizer.param_groups[0]['lr'],\n 'total_loss': total_loss.item(),\n 'policy_loss': a2c_loss.policy_loss.item(),\n 'value_loss': a2c_loss.value_loss.item(),\n 'entropy_loss': a2c_loss.entropy_loss.item(),\n 'adv_abs_max': adv.abs().max().item(),\n 'grad_norm': grad_norm,\n }\n\n def _state_dict_learn(self) -> Dict[str, Any]:\n return {\n 'model': self._learn_model.state_dict(),\n 'optimizer': self._optimizer.state_dict(),\n }\n\n def _load_state_dict_learn(self, state_dict: Dict[str, Any]) -> None:\n self._learn_model.load_state_dict(state_dict['model'])\n self._optimizer.load_state_dict(state_dict['optimizer'])\n\n def _init_collect(self) -> None:\n r\"\"\"\n Overview:\n Collect mode init method. Called by ``self.__init__``.\n Init traj and unroll length, collect model.\n \"\"\"\n\n self._unroll_len = self._cfg.collect.unroll_len\n self._collect_model = model_wrap(self._model, wrapper_name='multinomial_sample')\n self._collect_model.reset()\n # Algorithm\n self._gamma = self._cfg.collect.discount_factor\n self._gae_lambda = self._cfg.collect.gae_lambda\n\n def _forward_collect(self, data: dict) -> dict:\n r\"\"\"\n Overview:\n Forward function of collect mode.\n Arguments:\n - data (:obj:`Dict[str, Any]`): Dict type data, stacked env data for predicting policy_output(action), \\\n values are torch.Tensor or np.ndarray or dict/list combinations, keys are env_id indicated by integer.\n Returns:\n - output (:obj:`Dict[int, Any]`): Dict type data, including at least inferred action according to input obs.\n ReturnsKeys\n - necessary: ``action``\n \"\"\"\n data_id = list(data.keys())\n data = default_collate(list(data.values()))\n if self._cuda:\n data = to_device(data, self._device)\n self._collect_model.eval()\n with torch.no_grad():\n output = self._collect_model.forward(data, mode='compute_actor_critic')\n if self._cuda:\n output = to_device(output, 'cpu')\n output = default_decollate(output)\n return {i: d for i, d in zip(data_id, output)}\n\n def _process_transition(self, obs: Any, model_output: dict, timestep: namedtuple) -> dict:\n r\"\"\"\n Overview:\n Generate dict type transition data from inputs.\n Arguments:\n - obs (:obj:`Any`): Env observation\n - model_output (:obj:`dict`): Output of collect model, including at least ['action']\n - timestep (:obj:`namedtuple`): Output after env step, including at least ['obs', 'reward', 'done'] \\\n (here 'obs' indicates obs after env step).\n Returns:\n - transition (:obj:`dict`): Dict type transition data.\n \"\"\"\n transition = {\n 'obs': obs,\n 'next_obs': timestep.obs,\n 'action': model_output['action'],\n 'value': model_output['value'],\n 'reward': timestep.reward,\n 'done': timestep.done,\n }\n return transition\n\n def _get_train_sample(self, data: list) -> Union[None, List[Any]]:\n r\"\"\"\n Overview:\n Get the trajectory and the n step return data, then sample from the n_step return data\n Arguments:\n - data (:obj:`list`): The trajectory's buffer list\n Returns:\n - samples (:obj:`dict`): The training samples generated\n \"\"\"\n data = get_gae_with_default_last_value(\n data,\n data[-1]['done'],\n gamma=self._gamma,\n gae_lambda=self._gae_lambda,\n cuda=self._cuda,\n )\n return get_train_sample(data, self._unroll_len)\n\n def _init_eval(self) -> None:\n r\"\"\"\n Overview:\n Evaluate mode init method. Called by ``self.__init__``.\n Init eval model with argmax strategy.\n \"\"\"\n self._eval_model = model_wrap(self._model, wrapper_name='argmax_sample')\n self._eval_model.reset()\n\n def _forward_eval(self, data: dict) -> dict:\n r\"\"\"\n Overview:\n Forward function of eval mode, similar to ``self._forward_collect``.\n Arguments:\n - data (:obj:`Dict[str, Any]`): Dict type data, stacked env data for predicting policy_output(action), \\\n values are torch.Tensor or np.ndarray or dict/list combinations, keys are env_id indicated by integer.\n Returns:\n - output (:obj:`Dict[int, Any]`): The dict of predicting action for the interaction with env.\n ReturnsKeys\n - necessary: ``action``\n \"\"\"\n data_id = list(data.keys())\n data = default_collate(list(data.values()))\n if self._cuda:\n data = to_device(data, self._device)\n self._eval_model.eval()\n with torch.no_grad():\n output = self._eval_model.forward(data, mode='compute_actor')\n if self._cuda:\n output = to_device(output, 'cpu')\n output = default_decollate(output)\n return {i: d for i, d in zip(data_id, output)}\n\n def default_model(self) -> Tuple[str, List[str]]:\n return 'vac', ['ding.model.template.vac']\n\n def _monitor_vars_learn(self) -> List[str]:\n return super()._monitor_vars_learn() + ['policy_loss', 'value_loss', 'entropy_loss', 'adv_abs_max', 'grad_norm']\n", "import pytest\nimport time\nfrom itertools import product\nimport numpy as np\nimport torch\nfrom ding.rl_utils import ppg_data, ppg_joint_error\n\nuse_value_clip_args = [True, False]\nrandom_weight = torch.rand(4) + 1\nweight_args = [None, random_weight]\nargs = [item for item in product(*[use_value_clip_args, weight_args])]\n\n\n# due to numeric stability of this unittest, we rerun it when sporadic error occurs\[email protected]('use_value_clip, weight', args)\ndef test_ppg(use_value_clip, weight):\n error_count = 0\n while True:\n torch.manual_seed(time.time())\n B, N = 4, 32\n logit_new = torch.randn(B, N).add_(0.1).clamp_(0.1, 0.99)\n logit_old = logit_new.add_(torch.rand_like(logit_new) * 0.1).clamp_(0.1, 0.99)\n logit_new.requires_grad_(True)\n logit_old.requires_grad_(True)\n action = torch.randint(0, N, size=(B, ))\n value_new = torch.randn(B).requires_grad_(True)\n value_old = value_new + torch.rand_like(value_new) * 0.1\n return_ = torch.randn(B) * 2\n data = ppg_data(logit_new, logit_old, action, value_new, value_old, return_, weight)\n loss = ppg_joint_error(data, use_value_clip=use_value_clip)\n assert all([l.shape == tuple() for l in loss])\n assert logit_new.grad is None\n assert value_new.grad is None\n total_loss = sum(loss)\n try:\n total_loss.backward()\n except RuntimeError as e:\n print(\"[ERROR]: {}\".format(e))\n if error_count == 10:\n break\n error_count += 1\n continue\n assert isinstance(logit_new.grad, torch.Tensor)\n assert isinstance(value_new.grad, torch.Tensor)\n break\n", "from typing import List, Dict, Any, Tuple, Union\nfrom collections import namedtuple\nimport copy\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Normal, Independent\n\nfrom ding.torch_utils import Adam, to_device\nfrom ding.rl_utils import v_1step_td_data, v_1step_td_error, get_train_sample, \\\n qrdqn_nstep_td_data, qrdqn_nstep_td_error, get_nstep_return_data\nfrom ding.model import model_wrap\nfrom ding.utils import POLICY_REGISTRY\nfrom ding.utils.data import default_collate, default_decollate\nfrom .sac import SACPolicy\nfrom .dqn import DQNPolicy\nfrom .common_utils import default_preprocess_learn\n\n\n@POLICY_REGISTRY.register('cql')\nclass CQLPolicy(SACPolicy):\n r\"\"\"\n Overview:\n Policy class of CQL algorithm.\n\n Config:\n == ==================== ======== ============= ================================= =======================\n ID Symbol Type Default Value Description Other(Shape)\n == ==================== ======== ============= ================================= =======================\n 1 ``type`` str td3 | RL policy register name, refer | this arg is optional,\n | to registry ``POLICY_REGISTRY`` | a placeholder\n 2 ``cuda`` bool True | Whether to use cuda for network |\n 3 | ``random_`` int 10000 | Number of randomly collected | Default to 10000 for\n | ``collect_size`` | training samples in replay | SAC, 25000 for DDPG/\n | | buffer when training starts. | TD3.\n 4 | ``model.policy_`` int 256 | Linear layer size for policy |\n | ``embedding_size`` | network. |\n 5 | ``model.soft_q_`` int 256 | Linear layer size for soft q |\n | ``embedding_size`` | network. |\n 6 | ``model.value_`` int 256 | Linear layer size for value | Defalut to None when\n | ``embedding_size`` | network. | model.value_network\n | | | is False.\n 7 | ``learn.learning`` float 3e-4 | Learning rate for soft q | Defalut to 1e-3, when\n | ``_rate_q`` | network. | model.value_network\n | | | is True.\n 8 | ``learn.learning`` float 3e-4 | Learning rate for policy | Defalut to 1e-3, when\n | ``_rate_policy`` | network. | model.value_network\n | | | is True.\n 9 | ``learn.learning`` float 3e-4 | Learning rate for policy | Defalut to None when\n | ``_rate_value`` | network. | model.value_network\n | | | is False.\n 10 | ``learn.alpha`` float 0.2 | Entropy regularization | alpha is initiali-\n | | coefficient. | zation for auto\n | | | `\\alpha`, when\n | | | auto_alpha is True\n 11 | ``learn.repara_`` bool True | Determine whether to use |\n | ``meterization`` | reparameterization trick. |\n 12 | ``learn.`` bool False | Determine whether to use | Temperature parameter\n | ``auto_alpha`` | auto temperature parameter | determines the\n | | `\\alpha`. | relative importance\n | | | of the entropy term\n | | | against the reward.\n 13 | ``learn.-`` bool False | Determine whether to ignore | Use ignore_done only\n | ``ignore_done`` | done flag. | in halfcheetah env.\n 14 | ``learn.-`` float 0.005 | Used for soft update of the | aka. Interpolation\n | ``target_theta`` | target network. | factor in polyak aver\n | | | aging for target\n | | | networks.\n == ==================== ======== ============= ================================= =======================\n \"\"\"\n\n config = dict(\n # (str) RL policy register name (refer to function \"POLICY_REGISTRY\").\n type='sac',\n # (bool) Whether to use cuda for network.\n cuda=False,\n # (bool type) on_policy: Determine whether on-policy or off-policy.\n # on-policy setting influences the behaviour of buffer.\n # Default False in SAC.\n on_policy=False,\n # (bool type) priority: Determine whether to use priority in buffer sample.\n # Default False in SAC.\n priority=False,\n # (bool) Whether use Importance Sampling Weight to correct biased update. If True, priority must be True.\n priority_IS_weight=False,\n # (int) Number of training samples(randomly collected) in replay buffer when training starts.\n # Default 10000 in SAC.\n random_collect_size=10000,\n model=dict(\n # (bool type) twin_critic: Determine whether to use double-soft-q-net for target q computation.\n # Please refer to TD3 about Clipped Double-Q Learning trick, which learns two Q-functions instead of one .\n # Default to True.\n twin_critic=True,\n\n # (bool type) value_network: Determine whether to use value network as the\n # original SAC paper (arXiv 1801.01290).\n # using value_network needs to set learning_rate_value, learning_rate_q,\n # and learning_rate_policy in `cfg.policy.learn`.\n # Default to False.\n # value_network=False,\n actor_head_type='reparameterization',\n ),\n learn=dict(\n # (bool) Whether to use multi gpu\n multi_gpu=False,\n # How many updates(iterations) to train after collector's one collection.\n # Bigger \"update_per_collect\" means bigger off-policy.\n # collect data -> update policy-> collect data -> ...\n update_per_collect=1,\n # (int) Minibatch size for gradient descent.\n batch_size=256,\n\n # (float type) learning_rate_q: Learning rate for soft q network.\n # Default to 3e-4.\n # Please set to 1e-3, when model.value_network is True.\n learning_rate_q=3e-4,\n # (float type) learning_rate_policy: Learning rate for policy network.\n # Default to 3e-4.\n # Please set to 1e-3, when model.value_network is True.\n learning_rate_policy=3e-4,\n # (float type) learning_rate_value: Learning rate for value network.\n # `learning_rate_value` should be initialized, when model.value_network is True.\n # Please set to 3e-4, when model.value_network is True.\n learning_rate_value=3e-4,\n\n # (float type) learning_rate_alpha: Learning rate for auto temperature parameter `\\alpha`.\n # Default to 3e-4.\n learning_rate_alpha=3e-4,\n # (float type) target_theta: Used for soft update of the target network,\n # aka. Interpolation factor in polyak averaging for target networks.\n # Default to 0.005.\n target_theta=0.005,\n # (float) discount factor for the discounted sum of rewards, aka. gamma.\n discount_factor=0.99,\n\n # (float type) alpha: Entropy regularization coefficient.\n # Please check out the original SAC paper (arXiv 1801.01290): Eq 1 for more details.\n # If auto_alpha is set to `True`, alpha is initialization for auto `\\alpha`.\n # Default to 0.2.\n alpha=0.2,\n\n # (bool type) auto_alpha: Determine whether to use auto temperature parameter `\\alpha` .\n # Temperature parameter determines the relative importance of the entropy term against the reward.\n # Please check out the original SAC paper (arXiv 1801.01290): Eq 1 for more details.\n # Default to False.\n # Note that: Using auto alpha needs to set learning_rate_alpha in `cfg.policy.learn`.\n auto_alpha=True,\n # (bool type) log_space: Determine whether to use auto `\\alpha` in log space.\n log_space=True,\n # (bool) Whether ignore done(usually for max step termination env. e.g. pendulum)\n # Note: Gym wraps the MuJoCo envs by default with TimeLimit environment wrappers.\n # These limit HalfCheetah, and several other MuJoCo envs, to max length of 1000.\n # However, interaction with HalfCheetah always gets done with done is False,\n # Since we inplace done==True with done==False to keep\n # TD-error accurate computation(``gamma * (1 - done) * next_v + reward``),\n # when the episode step is greater than max episode step.\n ignore_done=False,\n # (float) Weight uniform initialization range in the last output layer\n init_w=3e-3,\n # (int) The numbers of action sample each at every state s from a uniform-at-random\n num_actions=10,\n # (bool) Whether use lagrange multiplier in q value loss.\n with_lagrange=False,\n # (float) The threshold for difference in Q-values\n lagrange_thresh=-1,\n # (float) Loss weight for conservative item.\n min_q_weight=1.0,\n # (bool) Whether to use entory in target q.\n with_q_entropy=False,\n ),\n collect=dict(\n # You can use either \"n_sample\" or \"n_episode\" in actor.collect.\n # Get \"n_sample\" samples per collect.\n # Default n_sample to 1.\n n_sample=1,\n # (int) Cut trajectories into pieces with length \"unroll_len\".\n unroll_len=1,\n ),\n eval=dict(),\n other=dict(\n replay_buffer=dict(\n # (int type) replay_buffer_size: Max size of replay buffer.\n replay_buffer_size=1000000,\n # (int type) max_use: Max use times of one data in the buffer.\n # Data will be removed once used for too many times.\n # Default to infinite.\n # max_use=256,\n ),\n ),\n )\n r\"\"\"\n Overview:\n Policy class of SAC algorithm.\n \"\"\"\n\n def _init_learn(self) -> None:\n r\"\"\"\n Overview:\n Learn mode init method. Called by ``self.__init__``.\n Init q, value and policy's optimizers, algorithm config, main and target models.\n \"\"\"\n # Init\n self._priority = self._cfg.priority\n self._priority_IS_weight = self._cfg.priority_IS_weight\n self._value_network = False\n self._twin_critic = self._cfg.model.twin_critic\n self._num_actions = self._cfg.learn.num_actions\n\n self._min_q_version = 3\n self._min_q_weight = self._cfg.learn.min_q_weight\n self._with_lagrange = self._cfg.learn.with_lagrange and (self._lagrange_thresh > 0)\n self._lagrange_thresh = self._cfg.learn.lagrange_thresh\n if self._with_lagrange:\n self.target_action_gap = self._lagrange_thresh\n self.log_alpha_prime = torch.tensor(0.).to(self._device).requires_grad_()\n self.alpha_prime_optimizer = Adam(\n [self.log_alpha_prime],\n lr=self._cfg.learn.learning_rate_q,\n )\n\n self._with_q_entropy = self._cfg.learn.with_q_entropy\n\n # Weight Init\n init_w = self._cfg.learn.init_w\n self._model.actor[2].mu.weight.data.uniform_(-init_w, init_w)\n self._model.actor[2].mu.bias.data.uniform_(-init_w, init_w)\n self._model.actor[2].log_sigma_layer.weight.data.uniform_(-init_w, init_w)\n self._model.actor[2].log_sigma_layer.bias.data.uniform_(-init_w, init_w)\n if self._twin_critic:\n self._model.critic[0][2].last.weight.data.uniform_(-init_w, init_w)\n self._model.critic[0][2].last.bias.data.uniform_(-init_w, init_w)\n self._model.critic[1][2].last.weight.data.uniform_(-init_w, init_w)\n self._model.critic[1][2].last.bias.data.uniform_(-init_w, init_w)\n else:\n self._model.critic[2].last.weight.data.uniform_(-init_w, init_w)\n self._model.critic[2].last.bias.data.uniform_(-init_w, init_w)\n\n # Optimizers\n if self._value_network:\n self._optimizer_value = Adam(\n self._model.value_critic.parameters(),\n lr=self._cfg.learn.learning_rate_value,\n )\n self._optimizer_q = Adam(\n self._model.critic.parameters(),\n lr=self._cfg.learn.learning_rate_q,\n )\n self._optimizer_policy = Adam(\n self._model.actor.parameters(),\n lr=self._cfg.learn.learning_rate_policy,\n )\n\n # Algorithm config\n self._gamma = self._cfg.learn.discount_factor\n # Init auto alpha\n if self._cfg.learn.auto_alpha:\n self._target_entropy = self._cfg.learn.get('target_entropy', -np.prod(self._cfg.model.action_shape))\n if self._cfg.learn.log_space:\n self._log_alpha = torch.log(torch.FloatTensor([self._cfg.learn.alpha]))\n self._log_alpha = self._log_alpha.to(self._device).requires_grad_()\n self._alpha_optim = torch.optim.Adam([self._log_alpha], lr=self._cfg.learn.learning_rate_alpha)\n assert self._log_alpha.shape == torch.Size([1]) and self._log_alpha.requires_grad\n self._alpha = self._log_alpha.detach().exp()\n self._auto_alpha = True\n self._log_space = True\n else:\n self._alpha = torch.FloatTensor([self._cfg.learn.alpha]).to(self._device).requires_grad_()\n self._alpha_optim = torch.optim.Adam([self._alpha], lr=self._cfg.learn.learning_rate_alpha)\n self._auto_alpha = True\n self._log_space = False\n else:\n self._alpha = torch.tensor(\n [self._cfg.learn.alpha], requires_grad=False, device=self._device, dtype=torch.float32\n )\n self._auto_alpha = False\n\n # Main and target models\n self._target_model = copy.deepcopy(self._model)\n self._target_model = model_wrap(\n self._target_model,\n wrapper_name='target',\n update_type='momentum',\n update_kwargs={'theta': self._cfg.learn.target_theta}\n )\n self._learn_model = model_wrap(self._model, wrapper_name='base')\n self._learn_model.reset()\n self._target_model.reset()\n\n self._forward_learn_cnt = 0\n\n def _forward_learn(self, data: dict) -> Dict[str, Any]:\n r\"\"\"\n Overview:\n Forward and backward function of learn mode.\n Arguments:\n - data (:obj:`dict`): Dict type data, including at least ['obs', 'action', 'reward', 'next_obs']\n Returns:\n - info_dict (:obj:`Dict[str, Any]`): Including current lr and loss.\n \"\"\"\n loss_dict = {}\n data = default_preprocess_learn(\n data,\n use_priority=self._priority,\n use_priority_IS_weight=self._cfg.priority_IS_weight,\n ignore_done=self._cfg.learn.ignore_done,\n use_nstep=False\n )\n if len(data.get('action').shape) == 1:\n data['action'] = data['action'].reshape(-1, 1)\n\n if self._cuda:\n data = to_device(data, self._device)\n\n self._learn_model.train()\n self._target_model.train()\n obs = data['obs']\n next_obs = data['next_obs']\n reward = data['reward']\n done = data['done']\n\n # 1. predict q value\n q_value = self._learn_model.forward(data, mode='compute_critic')['q_value']\n\n # 2. predict target value\n if self._value_network:\n # predict v value\n v_value = self._learn_model.forward(obs, mode='compute_value_critic')['v_value']\n with torch.no_grad():\n next_v_value = self._target_model.forward(next_obs, mode='compute_value_critic')['v_value']\n target_q_value = next_v_value\n else:\n # target q value.\n with torch.no_grad():\n (mu, sigma) = self._learn_model.forward(next_obs, mode='compute_actor')['logit']\n\n dist = Independent(Normal(mu, sigma), 1)\n pred = dist.rsample()\n next_action = torch.tanh(pred)\n y = 1 - next_action.pow(2) + 1e-6\n next_log_prob = dist.log_prob(pred).unsqueeze(-1)\n next_log_prob = next_log_prob - torch.log(y).sum(-1, keepdim=True)\n\n next_data = {'obs': next_obs, 'action': next_action}\n target_q_value = self._target_model.forward(next_data, mode='compute_critic')['q_value']\n # the value of a policy according to the maximum entropy objective\n if self._twin_critic:\n # find min one as target q value\n if self._with_q_entropy:\n target_q_value = torch.min(target_q_value[0],\n target_q_value[1]) - self._alpha * next_log_prob.squeeze(-1)\n else:\n target_q_value = torch.min(target_q_value[0], target_q_value[1])\n else:\n if self._with_q_entropy:\n target_q_value = target_q_value - self._alpha * next_log_prob.squeeze(-1)\n\n # 3. compute q loss\n if self._twin_critic:\n q_data0 = v_1step_td_data(q_value[0], target_q_value, reward, done, data['weight'])\n loss_dict['critic_loss'], td_error_per_sample0 = v_1step_td_error(q_data0, self._gamma)\n q_data1 = v_1step_td_data(q_value[1], target_q_value, reward, done, data['weight'])\n loss_dict['twin_critic_loss'], td_error_per_sample1 = v_1step_td_error(q_data1, self._gamma)\n td_error_per_sample = (td_error_per_sample0 + td_error_per_sample1) / 2\n else:\n q_data = v_1step_td_data(q_value, target_q_value, reward, done, data['weight'])\n loss_dict['critic_loss'], td_error_per_sample = v_1step_td_error(q_data, self._gamma)\n\n # 4. add CQL\n\n curr_actions_tensor, curr_log_pis = self._get_policy_actions(data, self._num_actions)\n new_curr_actions_tensor, new_log_pis = self._get_policy_actions({'obs': next_obs}, self._num_actions)\n\n random_actions_tensor = torch.FloatTensor(curr_actions_tensor.shape).uniform_(-1,\n 1).to(curr_actions_tensor.device)\n\n obs_repeat = obs.unsqueeze(1).repeat(1, self._num_actions,\n 1).view(obs.shape[0] * self._num_actions, obs.shape[1])\n act_repeat = data['action'].unsqueeze(1).repeat(1, self._num_actions, 1).view(\n data['action'].shape[0] * self._num_actions, data['action'].shape[1]\n )\n\n q_rand = self._get_q_value({'obs': obs_repeat, 'action': random_actions_tensor})\n # q2_rand = self._get_q_value(obs, random_actions_tensor, network=self.qf2)\n q_curr_actions = self._get_q_value({'obs': obs_repeat, 'action': curr_actions_tensor})\n # q2_curr_actions = self._get_tensor_values(obs, curr_actions_tensor, network=self.qf2)\n q_next_actions = self._get_q_value({'obs': obs_repeat, 'action': new_curr_actions_tensor})\n # q2_next_actions = self._get_tensor_values(obs, new_curr_actions_tensor, network=self.qf2)\n\n cat_q1 = torch.cat([q_rand[0], q_value[0].reshape(-1, 1, 1), q_next_actions[0], q_curr_actions[0]], 1)\n cat_q2 = torch.cat([q_rand[1], q_value[1].reshape(-1, 1, 1), q_next_actions[1], q_curr_actions[1]], 1)\n std_q1 = torch.std(cat_q1, dim=1)\n std_q2 = torch.std(cat_q2, dim=1)\n if self._min_q_version == 3:\n # importance sammpled version\n random_density = np.log(0.5 ** curr_actions_tensor.shape[-1])\n cat_q1 = torch.cat(\n [\n q_rand[0] - random_density, q_next_actions[0] - new_log_pis.detach(),\n q_curr_actions[0] - curr_log_pis.detach()\n ], 1\n )\n cat_q2 = torch.cat(\n [\n q_rand[1] - random_density, q_next_actions[1] - new_log_pis.detach(),\n q_curr_actions[1] - curr_log_pis.detach()\n ], 1\n )\n\n min_qf1_loss = torch.logsumexp(cat_q1, dim=1).mean() * self._min_q_weight\n min_qf2_loss = torch.logsumexp(cat_q2, dim=1).mean() * self._min_q_weight\n \"\"\"Subtract the log likelihood of data\"\"\"\n min_qf1_loss = min_qf1_loss - q_value[0].mean() * self._min_q_weight\n min_qf2_loss = min_qf2_loss - q_value[1].mean() * self._min_q_weight\n\n if self._with_lagrange:\n alpha_prime = torch.clamp(self.log_alpha_prime.exp(), min=0.0, max=1000000.0)\n min_qf1_loss = alpha_prime * (min_qf1_loss - self.target_action_gap)\n min_qf2_loss = alpha_prime * (min_qf2_loss - self.target_action_gap)\n\n self.alpha_prime_optimizer.zero_grad()\n alpha_prime_loss = (-min_qf1_loss - min_qf2_loss) * 0.5\n alpha_prime_loss.backward(retain_graph=True)\n self.alpha_prime_optimizer.step()\n\n loss_dict['critic_loss'] += min_qf1_loss\n if self._twin_critic:\n loss_dict['twin_critic_loss'] += min_qf2_loss\n\n # 5. update q network\n self._optimizer_q.zero_grad()\n loss_dict['critic_loss'].backward(retain_graph=True)\n if self._twin_critic:\n loss_dict['twin_critic_loss'].backward()\n self._optimizer_q.step()\n\n # 6. evaluate to get action distribution\n (mu, sigma) = self._learn_model.forward(data['obs'], mode='compute_actor')['logit']\n dist = Independent(Normal(mu, sigma), 1)\n pred = dist.rsample()\n action = torch.tanh(pred)\n y = 1 - action.pow(2) + 1e-6\n log_prob = dist.log_prob(pred).unsqueeze(-1)\n log_prob = log_prob - torch.log(y).sum(-1, keepdim=True)\n\n eval_data = {'obs': obs, 'action': action}\n new_q_value = self._learn_model.forward(eval_data, mode='compute_critic')['q_value']\n if self._twin_critic:\n new_q_value = torch.min(new_q_value[0], new_q_value[1])\n\n # 7. (optional)compute value loss\n if self._value_network:\n # new_q_value: (bs, ), log_prob: (bs, act_shape) -> target_v_value: (bs, )\n if self._with_q_entropy:\n target_v_value = (new_q_value.unsqueeze(-1) - self._alpha * log_prob).mean(dim=-1)\n else:\n target_v_value = new_q_value.unsqueeze(-1).mean(dim=-1)\n loss_dict['value_loss'] = F.mse_loss(v_value, target_v_value.detach())\n\n # update value network\n self._optimizer_value.zero_grad()\n loss_dict['value_loss'].backward()\n self._optimizer_value.step()\n\n # 8. compute policy loss\n policy_loss = (self._alpha * log_prob - new_q_value.unsqueeze(-1)).mean()\n\n loss_dict['policy_loss'] = policy_loss\n\n # 9. update policy network\n self._optimizer_policy.zero_grad()\n loss_dict['policy_loss'].backward()\n self._optimizer_policy.step()\n\n # 10. compute alpha loss\n if self._auto_alpha:\n if self._log_space:\n log_prob = log_prob + self._target_entropy\n loss_dict['alpha_loss'] = -(self._log_alpha * log_prob.detach()).mean()\n\n self._alpha_optim.zero_grad()\n loss_dict['alpha_loss'].backward()\n self._alpha_optim.step()\n self._alpha = self._log_alpha.detach().exp()\n else:\n log_prob = log_prob + self._target_entropy\n loss_dict['alpha_loss'] = -(self._alpha * log_prob.detach()).mean()\n\n self._alpha_optim.zero_grad()\n loss_dict['alpha_loss'].backward()\n self._alpha_optim.step()\n self._alpha = max(0, self._alpha)\n\n loss_dict['total_loss'] = sum(loss_dict.values())\n\n # =============\n # after update\n # =============\n self._forward_learn_cnt += 1\n # target update\n self._target_model.update(self._learn_model.state_dict())\n return {\n 'cur_lr_q': self._optimizer_q.defaults['lr'],\n 'cur_lr_p': self._optimizer_policy.defaults['lr'],\n 'priority': td_error_per_sample.abs().tolist(),\n 'td_error': td_error_per_sample.detach().mean().item(),\n 'alpha': self._alpha.item(),\n 'target_q_value': target_q_value.detach().mean().item(),\n **loss_dict\n }\n\n def _get_policy_actions(self, data: Dict, num_actions=10, epsilon: float = 1e-6) -> List:\n\n # evaluate to get action distribution\n obs = data['obs']\n obs = obs.unsqueeze(1).repeat(1, num_actions, 1).view(obs.shape[0] * num_actions, obs.shape[1])\n (mu, sigma) = self._learn_model.forward(obs, mode='compute_actor')['logit']\n dist = Independent(Normal(mu, sigma), 1)\n pred = dist.rsample()\n action = torch.tanh(pred)\n\n # evaluate action log prob depending on Jacobi determinant.\n y = 1 - action.pow(2) + epsilon\n log_prob = dist.log_prob(pred).unsqueeze(-1)\n log_prob = log_prob - torch.log(y).sum(-1, keepdim=True)\n\n return action, log_prob.view(-1, num_actions, 1)\n\n def _get_q_value(self, data: Dict, keep=True) -> torch.Tensor:\n new_q_value = self._learn_model.forward(data, mode='compute_critic')['q_value']\n if self._twin_critic:\n new_q_value = [value.view(-1, self._num_actions, 1) for value in new_q_value]\n else:\n new_q_value = new_q_value.view(-1, self._num_actions, 1)\n if self._twin_critic and not keep:\n new_q_value = torch.min(new_q_value[0], new_q_value[1])\n return new_q_value\n\n\n@POLICY_REGISTRY.register('cql_discrete')\nclass CQLDiscretePolicy(DQNPolicy):\n r\"\"\"\n Overview:\n Policy class of CQL algorithm in discrete environments.\n\n Config:\n == ==================== ======== ============== ======================================== =======================\n ID Symbol Type Default Value Description Other(Shape)\n == ==================== ======== ============== ======================================== =======================\n 1 ``type`` str qrdqn | RL policy register name, refer to | this arg is optional,\n | registry ``POLICY_REGISTRY`` | a placeholder\n 2 ``cuda`` bool False | Whether to use cuda for network | this arg can be diff-\n | erent from modes\n 3 ``on_policy`` bool False | Whether the RL algorithm is on-policy\n | or off-policy\n 4 ``priority`` bool True | Whether use priority(PER) | priority sample,\n | update priority\n 6 | ``other.eps`` float 0.05 | Start value for epsilon decay. It's\n | ``.start`` | small because rainbow use noisy net.\n 7 | ``other.eps`` float 0.05 | End value for epsilon decay.\n | ``.end``\n 8 | ``discount_`` float 0.97, | Reward's future discount factor, aka. | may be 1 when sparse\n | ``factor`` [0.95, 0.999] | gamma | reward env\n 9 ``nstep`` int 3, | N-step reward discount sum for target\n [3, 5] | q_value estimation\n 10 | ``learn.update`` int 3 | How many updates(iterations) to train | this args can be vary\n | ``per_collect`` | after collector's one collection. Only | from envs. Bigger val\n | valid in serial training | means more off-policy\n 11 ``learn.kappa`` float / | Threshold of Huber loss\n == ==================== ======== ============== ======================================== =======================\n \"\"\"\n\n config = dict(\n # (str) RL policy register name (refer to function \"POLICY_REGISTRY\").\n type='cql_discrete',\n # (bool) Whether to use cuda for network.\n cuda=False,\n # (bool) Whether the RL algorithm is on-policy or off-policy.\n on_policy=False,\n # (bool) Whether use priority(priority sample, IS weight, update priority)\n priority=False,\n # (float) Reward's future discount factor, aka. gamma.\n discount_factor=0.97,\n # (int) N-step reward for target q_value estimation\n nstep=1,\n learn=dict(\n # (bool) Whether to use multi gpu\n multi_gpu=False,\n # How many updates(iterations) to train after collector's one collection.\n # Bigger \"update_per_collect\" means bigger off-policy.\n # collect data -> update policy-> collect data -> ...\n update_per_collect=1,\n batch_size=64,\n learning_rate=0.001,\n # ==============================================================\n # The following configs are algorithm-specific\n # ==============================================================\n # (int) Frequence of target network update.\n target_update_freq=100,\n # (bool) Whether ignore done(usually for max step termination env)\n ignore_done=False,\n # (float) Loss weight for conservative item.\n min_q_weight=1.0,\n ),\n # collect_mode config\n collect=dict(\n # (int) Only one of [n_sample, n_step, n_episode] shoule be set\n # n_sample=8,\n # (int) Cut trajectories into pieces with length \"unroll_len\".\n unroll_len=1,\n ),\n eval=dict(),\n # other config\n other=dict(\n # Epsilon greedy with decay.\n eps=dict(\n # (str) Decay type. Support ['exp', 'linear'].\n type='exp',\n start=0.95,\n end=0.1,\n # (int) Decay length(env step)\n decay=10000,\n ),\n replay_buffer=dict(replay_buffer_size=10000, )\n ),\n )\n\n def _init_learn(self) -> None:\n r\"\"\"\n Overview:\n Learn mode init method. Called by ``self.__init__``.\n Init the optimizer, algorithm config, main and target models.\n \"\"\"\n self._min_q_weight = self._cfg.learn.min_q_weight\n self._priority = self._cfg.priority\n # Optimizer\n self._optimizer = Adam(self._model.parameters(), lr=self._cfg.learn.learning_rate)\n\n self._gamma = self._cfg.discount_factor\n self._nstep = self._cfg.nstep\n\n # use wrapper instead of plugin\n self._target_model = copy.deepcopy(self._model)\n self._target_model = model_wrap(\n self._target_model,\n wrapper_name='target',\n update_type='assign',\n update_kwargs={'freq': self._cfg.learn.target_update_freq}\n )\n self._learn_model = model_wrap(self._model, wrapper_name='argmax_sample')\n self._learn_model.reset()\n self._target_model.reset()\n\n def _forward_learn(self, data: dict) -> Dict[str, Any]:\n r\"\"\"\n Overview:\n Forward and backward function of learn mode.\n Arguments:\n - data (:obj:`dict`): Dict type data, including at least ['obs', 'action', 'reward', 'next_obs']\n Returns:\n - info_dict (:obj:`Dict[str, Any]`): Including current lr and loss.\n \"\"\"\n data = default_preprocess_learn(\n data, use_priority=self._priority, ignore_done=self._cfg.learn.ignore_done, use_nstep=True\n )\n if self._cuda:\n data = to_device(data, self._device)\n # ====================\n # Q-learning forward\n # ====================\n self._learn_model.train()\n self._target_model.train()\n # Current q value (main model)\n ret = self._learn_model.forward(data['obs'])\n q_value, tau = ret['q'], ret['tau']\n # Target q value\n with torch.no_grad():\n target_q_value = self._target_model.forward(data['next_obs'])['q']\n # Max q value action (main model)\n target_q_action = self._learn_model.forward(data['next_obs'])['action']\n\n # add CQL\n # 1. chose action and compute q in dataset.\n # 2. compute value loss(negative_sampling - dataset_expec)\n replay_action_one_hot = F.one_hot(data['action'], self._cfg.model.action_shape)\n replay_chosen_q = (q_value.mean(-1) * replay_action_one_hot).sum(dim=1)\n\n dataset_expec = replay_chosen_q.mean()\n\n negative_sampling = torch.logsumexp(q_value.mean(-1), dim=1).mean()\n\n min_q_loss = negative_sampling - dataset_expec\n\n data_n = qrdqn_nstep_td_data(\n q_value, target_q_value, data['action'], target_q_action, data['reward'], data['done'], tau, data['weight']\n )\n value_gamma = data.get('value_gamma')\n loss, td_error_per_sample = qrdqn_nstep_td_error(\n data_n, self._gamma, nstep=self._nstep, value_gamma=value_gamma\n )\n\n loss += self._min_q_weight * min_q_loss\n\n # ====================\n # Q-learning update\n # ====================\n self._optimizer.zero_grad()\n loss.backward()\n if self._cfg.learn.multi_gpu:\n self.sync_gradients(self._learn_model)\n self._optimizer.step()\n\n # =============\n # after update\n # =============\n self._target_model.update(self._learn_model.state_dict())\n return {\n 'cur_lr': self._optimizer.defaults['lr'],\n 'total_loss': loss.item(),\n 'priority': td_error_per_sample.abs().tolist(),\n 'q_target': target_q_value.mean().item(),\n 'q_value': q_value.mean().item(),\n # Only discrete action satisfying len(data['action'])==1 can return this and draw histogram on tensorboard.\n # '[histogram]action_distribution': data['action'],\n }\n\n def _state_dict_learn(self) -> Dict[str, Any]:\n return {\n 'model': self._learn_model.state_dict(),\n 'target_model': self._target_model.state_dict(),\n 'optimizer': self._optimizer.state_dict(),\n }\n\n def _load_state_dict_learn(self, state_dict: Dict[str, Any]) -> None:\n self._learn_model.load_state_dict(state_dict['model'])\n self._target_model.load_state_dict(state_dict['target_model'])\n self._optimizer.load_state_dict(state_dict['optimizer'])\n\n def _init_collect(self) -> None:\n r\"\"\"\n Overview:\n Collect mode init method. Called by ``self.__init__``.\n Init traj and unroll length, collect model.\n Enable the eps_greedy_sample\n \"\"\"\n self._unroll_len = self._cfg.collect.unroll_len\n self._gamma = self._cfg.discount_factor # necessary for parallel\n self._nstep = self._cfg.nstep # necessary for parallel\n self._collect_model = model_wrap(self._model, wrapper_name='eps_greedy_sample')\n self._collect_model.reset()\n\n def _forward_collect(self, data: Dict[int, Any], eps: float) -> Dict[int, Any]:\n \"\"\"\n Overview:\n Forward computation graph of collect mode(collect training data), with eps_greedy for exploration.\n Arguments:\n - data (:obj:`Dict[str, Any]`): Dict type data, stacked env data for predicting policy_output(action), \\\n values are torch.Tensor or np.ndarray or dict/list combinations, keys are env_id indicated by integer.\n - eps (:obj:`float`): epsilon value for exploration, which is decayed by collected env step.\n Returns:\n - output (:obj:`Dict[int, Any]`): The dict of predicting policy_output(action) for the interaction with \\\n env and the constructing of transition.\n ArgumentsKeys:\n - necessary: ``obs``\n ReturnsKeys\n - necessary: ``logit``, ``action``\n \"\"\"\n data_id = list(data.keys())\n data = default_collate(list(data.values()))\n if self._cuda:\n data = to_device(data, self._device)\n self._collect_model.eval()\n with torch.no_grad():\n output = self._collect_model.forward(data, eps=eps)\n if self._cuda:\n output = to_device(output, 'cpu')\n output = default_decollate(output)\n return {i: d for i, d in zip(data_id, output)}\n\n def _get_train_sample(self, data: list) -> Union[None, List[Any]]:\n r\"\"\"\n Overview:\n Get the trajectory and the n step return data, then sample from the n_step return data\n Arguments:\n - data (:obj:`list`): The trajectory's cache\n Returns:\n - samples (:obj:`dict`): The training samples generated\n \"\"\"\n data = get_nstep_return_data(data, self._nstep, gamma=self._gamma)\n return get_train_sample(data, self._unroll_len)\n\n def default_model(self) -> Tuple[str, List[str]]:\n return 'qrdqn', ['ding.model.template.q_learning']\n\n def _monitor_vars_learn(self) -> List[str]:\n return ['cur_lr', 'total_loss', 'q_target', 'q_value']\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.cuda.synchronize", "torch.cat", "torch.randn", "torch.rand", "torch.cuda.is_available", "torch.flatten" ], [ "torch.nn.ReLU" ], [ "numpy.random.seed", "numpy.random.choice", "numpy.stack", "numpy.concatenate", "numpy.array", "numpy.zeros" ], [ "torch.no_grad" ], [ "torch.randn", "torch.randint", "torch.rand", "torch.rand_like" ], [ "torch.optim.Adam", "numpy.log", "torch.Size", "torch.min", "torch.tensor", "torch.tanh", "torch.std", "torch.no_grad", "torch.FloatTensor", "numpy.prod", "torch.distributions.Normal", "torch.nn.functional.one_hot", "torch.log", "torch.logsumexp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Shivakoreddi/CryptoDataApplication
[ "ad620231a0614ed6f4f587dfcfb83249d1d16689" ]
[ "connectingPipelines/coins_ld.py" ]
[ "from apiWrapper import coinAPI\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table,Column,Integer,String,MetaData,ForeignKey\nimport sqlite3\nfrom sqlite3 import Error\nimport pandas as pd\nimport os\n\n\ndef main():\n path = \"/CryptoDataApplication/\"\n for filename in os.listdir(path):\n if filename.startswith('valid_coin'):\n file = filename\n\n coin_df = pd.read_csv(file,sep=',')\n conn = sqlite3.connect('/CryptoDataApplication/transactionDB/tradingSchema.db')\n cursor = conn.cursor()\n query = []\n ##for index,row in coin_df.iterrows():\n ##query = \"\"\"INSERT OR REPLACE INTO coins(id,symbol,name,image) VALUES('{0}','{1}','{2}','{3}')\"\"\".format(row['id'],row['symbol'],row['name'],row['image'])\n #print(query[1])\n ##cursor.execute(query)\n ##conn.commit()\n\n cursor.execute(\"select * from coins\")\n rows = cursor.fetchall()\n for row in rows:\n print(row)\n\n\nif __name__==\"__main__\":\n main()\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Nikolay-Lysenko/gpn
[ "a59f43e90536f85f8b0051c5ce6d0497081a5a8f" ]
[ "tests/test_graph.py" ]
[ "\"\"\"\nTest `graph.py` module.\n\nAuthor: Nikolay Lysenko\n\"\"\"\n\n\nfrom typing import List, Tuple\n\nimport pytest\nimport tensorflow as tf\nimport numpy as np\n\nfrom gpn.graph import sample_multiple_fragments\n\n\[email protected](\n \"images, corners, fragment_size, frame_size, n_channels, expected\",\n [\n (\n # `images`\n np.array([\n [\n [[1, 0, 1, 0],\n [0, 1, 0, 1],\n [1, 0, 1, 0],\n [0, 1, 0, 1]],\n [[1, 1, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1]]\n ],\n [\n [[1, 1, 0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 1, 1]]\n ]\n ]).swapaxes(1, 3),\n # `corners`\n [(1, 1), (0, 2)],\n # `fragment_size`\n 4,\n # `frame_size`\n 1,\n # `n_channels`\n 3,\n # `expected`\n np.array([\n [\n [[1, 0, 1, 0],\n [0, 1, 0, 1],\n [1, 0, 1, 0],\n [0, 1, 0, 1]],\n [[1, 1, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1]]\n ],\n [\n [[1, 1, 0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 1, 1, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 1, 1],\n [0, 0, 1, 1]]\n ],\n [\n [[0, 0, 1, 0],\n [0, 1, 0, 1],\n [0, 0, 1, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 1, 1, 1],\n [0, 0, 0, 0]]\n ],\n [\n [[0, 1, 1, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 1],\n [0, 0, 0, 0]]\n ],\n ]).swapaxes(1, 3)\n )\n ]\n)\ndef test_sample_multiple_fragments(\n images: np.ndarray, corners: List[Tuple[int, int]],\n fragment_size: int, frame_size: int, n_channels: int,\n expected: np.ndarray\n) -> None:\n \"\"\"Test `sample_multiple_fragments` function.\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n tensor_images = tf.placeholder(tf.float32, images.shape)\n tensor_corners = [\n tf.placeholder(tf.int32, (2,), name=f'corner_{i}')\n for i, _ in enumerate(corners)\n ]\n tensor_fragments = sample_multiple_fragments(\n tensor_images, tensor_corners,\n fragment_size, frame_size, n_channels\n )\n with tf.Session(graph=graph) as sess:\n feed_dict = {\n tensor_images: images,\n **{k: v for k, v in zip(tensor_corners, corners)}\n }\n fragments = tensor_fragments.eval(feed_dict, sess)\n np.testing.assert_array_equal(fragments, expected)\n" ]
[ [ "tensorflow.Graph", "tensorflow.placeholder", "numpy.testing.assert_array_equal", "tensorflow.Session", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
caserwin/daily-learning-python
[ "01fea4c5d4e86cbea2dbef8817146f018b5f1479" ]
[ "demo_sklearn/model/model_test.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/19 下午2:08\n# @Author : Erwin\nfrom common.pickle_helper import read_model\nimport numpy as np\n# noinspection PyUnresolvedReferences\nfrom sklearn.neighbors import LocalOutlierFactor\n# noinspection PyUnresolvedReferences\nfrom sklearn.ensemble import IsolationForest\n\nlof_model = read_model(\"./sklearn_LOF_demo1.pkl\")\nif_model = read_model(\"./sklearn_IsolationForest_demo1.pkl\")\n\nuser_define = np.array([(2, 3), (5, 6), (2.3, 1.8)])\n# -1表示异常点,1表示正常点。\nprint(lof_model.predict(user_define))\nprint(if_model.predict(user_define))" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
doffing81/lambdata-AshleyBrooks213
[ "9c5d4b5f49094e1b2d43f51e7e42ece2e98e3bb6" ]
[ "lambdata/helper_functions.py" ]
[ "\"\"\"A collection of Data Science helper functions\"\"\"\n\nimport pandas as pd \nimport numpy as np \nimport random\n\ndef df_cleaner(df):\n \"\"\"Clean a df of nulls\"\"\"\n return df.dropna()\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"df_cleaner is working!\") \n\n\ndef null_count(df):\n \"\"\"Check a dataframe for nulls and return the \n number of missing values\"\"\"\n return df.isnull().sum().sum()\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"null_count is working!\")\n\n\ndef train_test_split(df, frac):\n \"\"\"\n Create a Train/Test split function for a dataframe and return both \n the Training and Testing sets.\n Frac refers to the percent of data you would like to set aside\n for training.\n \"\"\"\n frac = round(len(df)*frac)\n train = df[:frac]\n test = df[frac:]\n\n return train, test\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"train_test_split is working!\")\n\n\ndef randomize(df, seed):\n \"\"\"\n Testing randomize(df) function: Develop a \n randomization function that randomizes all of \n a dataframes cells then returns that randomized dataframe\n \"\"\"\n \"\"\"NOTE: I am not sure about the seed part.\"\"\"\n #seed = np.random.seed(0)\n \"\"\"Randomly sample 100% of your df\"\"\"\n df = df.sample(frac=1, random_state=seed)#.reset_index(drop=True)\n return df\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"randomize is working!\")\n\n \ndef addy_split(add_series):\n cities = []\n states = []\n zipcodes = []\n for row in add_series.iterrows():\n alist = row.split()\n #if statements to find city\n city = [word for word in alist if word[-1] == ',']\n cities.append(city)\n #if statements to find state\n state = [piece for piece in alist if len(piece) == 2 and piece[:2].isupper() == True]\n states.append(state)\n # if statements to zipcode\n zipcode = [n for n in alist if len(n) == 5 and n.isdigit() == True]\n zipcodes.append(zipcode)\n df = pd.DataFrame({'city': cities, 'state': states, 'zip': zipcodes})\n return df\n\n\n\n\"\"\"Check to make sure that code works\"\"\"\nprint(\"addy_split is working!\")\n\n\ndef abbr_2_st(state_series, abbr_2_st=True):\n \"\"\"\n Return a new column with the full name from a State\n abbreviation column -> An input of FL would return Florida.\n This function should also take a boolean (abbr_2_state)\n and when False takes full state names and return state abbreviations.\n -> An input of Florida would return Fl.\n \"\"\"\n us_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'American Samoa': 'AS',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Guam': 'GU',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Northern Mariana Islands':'MP',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Puerto Rico': 'PR',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virgin Islands': 'VI',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY'\n}\n if abbr_2_st == True:\n inv_map = {v: k for k, v in us_state_abbrev.items()}\n full_names = []\n for abbv in state_series:\n full_names.append(inv_map[abbv])\n return full_names\n else:\n # Return Abbreviation\n abbvs = []\n for full_name in state_series:\n abbvs.append(us_state_abbrev[full_name])\n return abbvs\n\n\n\nFAVORITE_ANIMALS = ['dolphin', 'whale', 'seadragon', 'wolf', 'tiger']\nFAVORITE_COLORS = ['pink', 'blue', 'purple', 'green']\n\ndef add(x1, x2):\n return x1 + x2\n \n\ndef increment(x):\n return x + 1\n\n\"\"\"Check to make sure code works all the way through\"\"\"\nprint(\"it worked!\")" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tianjuxue/AmorFEA
[ "5ddf6c1c9d4489e74a207d5d63ca00af57911ab0" ]
[ "src/opt/optimizer_robot.py" ]
[ "import numpy as np\nimport torch\nimport scipy.optimize as opt\nimport time\nfrom .optimizer import Optimizer\nfrom ..ml.trainer_robot import TrainerRobot\nfrom ..ml.models import RobotNetwork, RobotSolver\nfrom .. import arguments\nfrom ..graph.visualization import scalar_field_paraview\n\n\nclass OptimizerRobot(Optimizer):\n\n def __init__(self, args):\n super(OptimizerRobot, self).__init__(args)\n self.tip_x1_index = 6\n self.tip_x2_index = 7\n self.trainer = TrainerRobot(args, opt=True)\n self.path = self.args.root_path + '/' + self.args.model_path + '/' + \\\n self.trainer.poisson.name + '/model_s'\n self.model = RobotNetwork(self.args, self.trainer.graph_info)\n self.model.load_state_dict(torch.load(self.path))\n\n\nclass OptimizerRobotTrajectory(OptimizerRobot):\n\n def __init__(self, args):\n super(OptimizerRobotTrajectory, self).__init__(args)\n self.target_coos = heart_shape()\n self.n_pts = self.target_coos.shape[1]\n\n def optimize(self):\n x_initial = np.zeros(self.args.input_size * self.n_pts)\n options = {'eps': 1e-15, 'maxiter': 1000,\n 'disp': True}\n res = opt.minimize(fun=self._objective,\n x0=x_initial,\n method='CG',\n jac=self._derivative,\n callback=None,\n options=options)\n x_opt = res.x.reshape(-1, self.args.input_size)\n source = torch.tensor(x_opt, dtype=torch.float)\n solution = self.model(source)\n print(\"NN surrogate, loss is\", self.trainer.loss_function(\n source, solution).data.numpy())\n for i in range(31):\n scalar_field_paraview(self.args, solution.data.numpy()[\n i], self.trainer.poisson, \"/robot/time_series_nn/u\" + str(i))\n\n for i in range(31):\n gt_sol = self.trainer.forward_prediction(x_opt[i], self.model)\n scalar_field_paraview(\n self.args, gt_sol, self.trainer.poisson, \"/robot/time_series_gt/u\" + str(i))\n\n return res.x\n\n def _obj(self, source):\n source = source.reshape(-1, self.args.input_size)\n solution = self.model(source)\n sol_tip = solution[:, [self.tip_x1_index, self.tip_x2_index]]\n tar_tip = torch.tensor(self.target_coos.transpose(), dtype=torch.float)\n L_dist = ((sol_tip - tar_tip)**2).sum()\n L_reg = ((source[1:, :] - source[:-1, :])**2).sum()\n alpha = 0 * 1e-3\n L = L_dist + alpha * L_reg\n return L\n\n\nclass OptimizerRobotPoint(OptimizerRobot):\n\n def __init__(self, args):\n super(OptimizerRobotPoint, self).__init__(args)\n self.target_point = np.array([0, -2])\n self.para_data = None\n\n def _opt(self, alpha=1e-2, x_initial=None, maxiter=200, log_interval=20):\n if x_initial is None:\n x_initial = np.zeros(self.args.input_size)\n\n x = x_initial\n start = time.time()\n wall_time = [0]\n objective = []\n source = [x]\n for i in range(maxiter):\n obj = self._objective(x)\n der = self._derivative(x)\n x = x - alpha * der\n if i % log_interval == 0:\n print(\"loop {} obj {}\".format(i, obj))\n wall_time.append(time.time() - start)\n objective.append(obj)\n source.append(x)\n x_opt = x\n objective.append(self._objective(x))\n return x_opt, np.asarray(wall_time), np.asarray(objective), np.asarray(source)\n\n def L_dist(self, solution):\n L = (solution[0][self.tip_x1_index] - self.target_point[0])**2 \\\n + (solution[0][self.tip_x2_index] - self.target_point[1])**2\n return L\n\n def evaluate(self, source):\n solution, _ = self.trainer.forward_prediction(source, model=self.model)\n L = self.L_dist(np.expand_dims(solution, axis=0))\n return L, solution\n\n def batch_evaluate(self, source):\n Ls = []\n sols = []\n for s in source:\n L, sol = self.evaluate(s)\n Ls.append(L)\n sols.append(sol)\n print(\"Evaluated L\", L)\n return np.asarray(Ls), np.asarray(sols)\n\n\nclass OptimizerRobotPointFree(OptimizerRobotPoint):\n\n def __init__(self, args):\n super(OptimizerRobotPointFree, self).__init__(args)\n\n def optimize(self, x_initial=None):\n if x_initial is None:\n x_initial = 0.1 * np.ones(self.args.input_size)\n\n x = x_initial\n self._obj(x)\n options = {'maxiter': 100, 'disp': True,\n 'adaptive': True}\n res = opt.minimize(fun=self._obj,\n x0=x_initial,\n method='Nelder-Mead',\n options=options)\n x_opt = x\n return x_opt\n\n def _obj(self, source):\n solution, _ = self.trainer.forward_prediction(\n source, model=None, para_data=self.para_data)\n L = self.L_dist(torch.tensor(solution, dtype=torch.float).unsqueeze(0))\n print(L)\n return L.item()\n\n\nclass OptimizerRobotPointSurrogate(OptimizerRobotPoint):\n\n def __init__(self, args):\n super(OptimizerRobotPointSurrogate, self).__init__(args)\n\n def optimize(self, alpha=1e-2, x_initial=None, maxiter=100, log_interval=100):\n return self._opt(alpha=alpha, x_initial=x_initial, maxiter=maxiter, log_interval=log_interval)\n\n def _obj(self, source):\n source = source.unsqueeze(0)\n solution = self.model(source)\n L = self.L_dist(solution)\n return L\n\n\nclass OptimizerRobotPointAdjoint(OptimizerRobotPoint):\n\n def __init__(self, args):\n super(OptimizerRobotPointAdjoint, self).__init__(args)\n\n def optimize(self, alpha=2 * 1e-2, x_initial=None, maxiter=20, log_interval=1):\n return self._opt(alpha=alpha, x_initial=x_initial, maxiter=maxiter, log_interval=log_interval)\n\n def _objective(self, source):\n _, self.para_data = self.trainer.forward_prediction(\n source, model=None, para_data=self.para_data)\n _, _, L = self._objective_partials(source, self.para_data)\n return L\n\n def _derivative(self, source):\n dcdx, dcdy = self._constraint_partials(source, self.para_data)\n dLdx, dLdy, _ = self._objective_partials(source, self.para_data)\n J = self._adjoint_derivative(dcdx, dcdy, dLdx, dLdy)\n return J\n\n def _adjoint_derivative(self, dcdx, dcdy, dLdx, dLdy):\n dcdx_T = dcdx.transpose()\n adjoint_sol = np.linalg.solve(dcdx_T, dLdx)\n total_derivative = -np.matmul(adjoint_sol, dcdy) + dLdy\n return total_derivative\n\n def _objective_partials(self, source, para_data):\n solver = RobotSolver(self.args, self.trainer.graph_info)\n solver.reset_parameters_data(para_data)\n\n source = torch.tensor(source, requires_grad=True, dtype=torch.float)\n source_input = source.unsqueeze(0)\n\n solution = solver(source_input)\n L = self.L_dist(solution)\n\n dLdx = torch.autograd.grad(\n L, solver.para, create_graph=True, retain_graph=True)[0]\n dLdy = torch.autograd.grad(\n L, source, create_graph=True, retain_graph=True)[0]\n\n return dLdx.data.numpy(), dLdy.data.numpy(), L.data.numpy()\n\n def _constraint_partials(self, source, para_data):\n solver = RobotSolver(self.args, self.trainer.graph_info)\n solver.reset_parameters_data(para_data)\n\n source = torch.tensor(source, requires_grad=True, dtype=torch.float)\n source_input = source.unsqueeze(0)\n\n solution = solver(source_input)\n L = self.trainer.loss_function(source_input, solution)\n c = torch.autograd.grad(\n L, solver.para, create_graph=True, retain_graph=True)[0]\n\n dcdx = torch.stack([torch.autograd.grad(\n c[i], solver.para, create_graph=True, retain_graph=True)[0] for i in range(len(c))])\n dcdy = torch.stack([torch.autograd.grad(\n c[i], source, create_graph=True, retain_graph=True)[0] for i in range(len(c))])\n\n return dcdx.data.numpy(), dcdy.data.numpy()\n\n\n'''Helpers'''\n\n\ndef heart_shape():\n def x_para(t):\n return 16 * np.sin(t)**3\n\n def y_para(t):\n return 13 * np.cos(t) - 5 * np.cos(2 * t) - 2 * np.cos(3 * t) - np.cos(4 * t) - 5\n vertical_dist = 2\n norm_factor = vertical_dist / (y_para(0) - y_para(np.pi))\n t = np.linspace(0, 2 * np.pi, 31)\n x = norm_factor * x_para(t)\n y = norm_factor * y_para(t)\n return np.asarray([x, y])\n\n\ndef circle_shape():\n t = np.linspace(0, np.pi, 4)\n x = 2 * np.cos(t - np.pi / 2.)\n y = 2 * np.sin(t - np.pi / 2.)\n return np.asarray([x, y])\n\n\ndef run_mixed_opt(alpha_nn,\n alpha_ad,\n maxiter_nn,\n maxiter_ad,\n log_interval_nn,\n log_interval_ad,\n optimizer_nn,\n optimizer_ad\n ):\n x_opt, wall_time_nn, objective_nn, source_nn = optimizer_nn.optimize(alpha=alpha_nn,\n x_initial=None,\n maxiter=maxiter_nn,\n log_interval=log_interval_nn)\n solver = RobotSolver(optimizer_nn.args, optimizer_nn.trainer.graph_info)\n solver.reset_parameters_network(torch.tensor(\n x_opt, dtype=torch.float).unsqueeze(0), optimizer_nn.model)\n para_data = solver.para.data\n optimizer_ad.para_data = para_data\n x_opt, wall_time_ad, objective_ad, source_ad = optimizer_ad.optimize(alpha=alpha_ad,\n x_initial=x_opt,\n maxiter=maxiter_ad,\n log_interval=log_interval_ad)\n\n wall_time_mix = np.concatenate(\n (wall_time_nn, wall_time_ad[1:] + wall_time_nn[-1]))\n objective_mix = np.concatenate((objective_nn, objective_ad[1:]))\n source_mix = np.concatenate((source_nn, source_ad[1:]))\n return x_opt, wall_time_mix, objective_mix, source_mix\n\n\ndef run_single_opt(alpha,\n maxiter,\n log_interval,\n optimizer\n ):\n x_opt, wall_time, objective, source = optimizer.optimize(alpha=alpha,\n x_initial=None,\n maxiter=maxiter,\n log_interval=log_interval)\n\n return x_opt, wall_time, objective, source\n\n\ndef run_one_case(args,\n alpha_nn,\n alpha_ad1,\n alpha_ad2,\n maxiter_nn,\n maxiter_ad1,\n maxiter_ad2,\n log_interval_nn,\n log_interval_ad1,\n log_interval_ad2,\n target_point,\n case_number\n ):\n\n print(\"\\ncase number {}\".format(case_number))\n\n optimizer_nn = OptimizerRobotPointSurrogate(args)\n optimizer_nn.target_point = target_point\n optimizer_ad = OptimizerRobotPointAdjoint(args)\n optimizer_ad.target_point = target_point\n\n _, wall_time_ad, objective_ad, source_ad = run_single_opt(\n alpha_ad1, maxiter_ad1, log_interval_ad1, optimizer_ad)\n print(\"\\n\")\n _, wall_time_mix, objective_mix, source_mix = run_mixed_opt(alpha_nn,\n alpha_ad2,\n maxiter_nn,\n maxiter_ad2,\n log_interval_nn,\n log_interval_ad2,\n optimizer_nn,\n optimizer_ad)\n\n nn_number = maxiter_nn // log_interval_nn\n objective_mix[\n :nn_number + 1], _ = optimizer_nn.batch_evaluate(source_mix[:nn_number + 1])\n _, optimal_solution = optimizer_nn.evaluate(source_mix[-1])\n\n print(\"true error ad\", objective_ad[-1])\n print(\"true error mix\", objective_mix[-1])\n\n np.savez(args.root_path + '/' + args.numpy_path\n + '/robot/deploy/case' + str(case_number) + '.npz',\n wall_time_ad=wall_time_ad,\n objective_ad=objective_ad,\n nn_number=nn_number,\n wall_time_mix=wall_time_mix,\n objective_mix=objective_mix,\n target_point=target_point\n )\n scalar_field_paraview(args, optimal_solution,\n optimizer_nn.trainer.poisson, \"/robot/deploy/u\" + str(case_number))\n\n\ndef run_walltime(args):\n target_coos = circle_shape()\n\n alpha_ad1_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n alpha_nn_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n alpha_ad2_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n\n maxiter_ad1_list = [20, 20, 20, 20]\n maxiter_nn_list = [400, 400, 4000, 6000]\n maxiter_ad2_list = [20, 20, 20, 20]\n\n log_interval_ad1_list = [1, 1, 1, 1]\n log_interval_nn_list = [40, 40, 400, 600]\n log_interval_ad2_list = [1, 1, 1, 1]\n\n for i in range(3, 4):\n run_one_case(args,\n alpha_nn_list[i],\n alpha_ad1_list[i],\n alpha_ad2_list[i],\n maxiter_nn_list[i],\n maxiter_ad1_list[i],\n maxiter_ad2_list[i],\n log_interval_nn_list[i],\n log_interval_ad1_list[i],\n log_interval_ad2_list[i],\n target_coos[:, i],\n i)\n\n\ndef run_step(args):\n target_coos = circle_shape()\n alpha_list = [1e-2, 1e-2, 2 * 1e-3, 2 * 1e-3]\n for case_number in range(2, 4):\n optimizer_nn = OptimizerRobotPointSurrogate(args)\n optimizer_ad = OptimizerRobotPointAdjoint(args)\n print(\"case_number\", case_number)\n target_point = target_coos[:, case_number]\n optimizer_nn.target_point = target_point\n optimizer_ad.target_point = target_point\n\n _, wall_time_ad, objective_ad, source_ad = run_single_opt(\n alpha_list[case_number], 100, 1, optimizer_ad)\n _, wall_time_nn, objective_nn, source_nn = run_single_opt(\n alpha_list[case_number], 100, 1, optimizer_nn)\n objective_nn, _ = optimizer_nn.batch_evaluate(source_nn)\n np.savez(args.root_path + '/' + args.numpy_path\n + '/robot/deploy/case_step' + str(case_number) + '.npz',\n objective_ad=objective_ad,\n objective_nn=objective_nn,\n target_point=target_point,\n wall_time_ad=wall_time_ad,\n wall_time_nn=wall_time_nn\n )\n\n\ndef run_gradient_free(args):\n target_coos = circle_shape()\n target_point = target_coos[:, 1]\n optimizer_fr = OptimizerRobotPointFree(args)\n optimizer_fr.target_point = target_point\n optimizer_fr.optimize()\n\n\nif __name__ == '__main__':\n args = arguments.args\n run_walltime(args)\n # run_step(args)\n" ]
[ [ "numpy.linalg.solve", "numpy.expand_dims", "numpy.linspace", "torch.load", "numpy.asarray", "numpy.matmul", "numpy.cos", "torch.tensor", "numpy.concatenate", "numpy.sin", "numpy.ones", "scipy.optimize.minimize", "numpy.array", "numpy.zeros", "torch.autograd.grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
alfrunesiq/SemanticSegmentationActiveLearning
[ "3f953a22c8fd95828c9bd4c5ce52a53e991391e4" ]
[ "active_learning.py" ]
[ "# Python standard libraries\nimport argparse\nimport glob\nimport json\nimport logging\nimport logging.config\nimport os\nimport sys\n\n# Non-standard includes\nimport numpy as np\nimport tensorflow as tf\n# Maybe import tqdm\nshow_progress = False\ntry:\n import tqdm\n show_progress = True\nexcept ImportError:\n pass\n\ntry:\n import tkinter\n tkinter.Tk().withdraw()\nexcept ImportError:\n if args.unlabelled == None:\n pass\n else:\n raise ImportError(\"Could not import tkinter, make sukre Tk \"\n \"dependencies are installed\")\nexcept Exception as e:\n print(e)\n pass\n\n# User includes\nimport models\nimport datasets\nimport tensortools as tt\n\n# Lowest representable float32\nEPSILON = np.finfo(np.float32).tiny\n\ndef main(args, logger):\n # Retrieve training parameters for convenience\n params = args.params # All parameters\n hparams = params[\"hyperparams\"] # Hyperparamters\n alparams = params[\"active_learning\"] # Active learning parameters\n state = None # State dict\n # Define state and config filenames\n state_filename = os.path.join(args.log_dir, \"state.json\")\n config_filename = os.path.join(args.log_dir, \"config.json\")\n if not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir)\n # Dump parameter config\n with open(config_filename, \"w+\") as f:\n json.dump(params, f, indent=4)\n\n # Retrieve dataset specific object\n if args.dataset == \"cityscapes\":\n dataset = datasets.Cityscapes(coarse=args.coarse)\n test_examples_glob = os.path.join(args.data_dir, \"val\", \"*.tfrecord\")\n elif args.dataset == \"freiburg\":\n dataset = datasets.Freiburg()\n test_examples_glob = os.path.join(args.data_dir, \"test\", \"*.tfrecord\")\n elif args.dataset == \"vistas\":\n dataset = datasets.Vistas()\n test_examples_glob = os.path.join(args.data_dir, \"val\", \"*.tfrecord\")\n else:\n raise NotImplementedError(\"Dataset \\\"%s\\\" not supported\" % args.dataset)\n\n # Prepare dataset example file paths.\n train_examples_glob = os.path.join(args.data_dir, \"train\", \"*.tfrecord\")\n\n if not os.path.exists(state_filename):\n # Initialize state\n # Resolve example filenames\n train_val_examples = np.sort(np.array(glob.glob(train_examples_glob)))\n # Pick examples from training set to use for validation\n val_examples = train_val_examples[:alparams[\"num_validation\"]]\n # Use the rest as training examples\n train_examples = train_val_examples[alparams[\"num_validation\"]:]\n\n # Use annotated test set, NOTE: cityscapes validation set\n test_examples = np.array(glob.glob(test_examples_glob))\n\n # Draw random train examples and mark as annotated\n train_indices = np.arange(len(train_examples), dtype=np.int32)\n np.random.shuffle(train_indices)\n\n initially_labelled = alparams[\"num_initially_labelled\"]\n if initially_labelled < 0:\n # Use rest of labelled examples\n initially_labelled = len(train_examples)\n\n # Possibly add actually unlabelled examples\n no_label_indices = np.empty(0, dtype=str)\n if args.unlabelled is not None:\n no_label_glob = os.path.join(args.unlabelled, \"*.tfrecord\")\n no_label_examples = glob.glob(no_label_glob)\n no_label_indices = np.arange(\n len(train_indices), len(train_indices)+len(no_label_examples)\n )\n train_examples = np.concatenate(train_examples,\n no_label_examples)\n train_indices = np.concatenate((train_indices, no_label_indices))\n\n labelled = train_indices[:initially_labelled]\n unlabelled = train_indices[initially_labelled:]\n del train_indices\n\n # Setup initial state\n state = {\n \"checkpoint\" : None, # Keep track of latest checkpoint.\n \"iteration\" : 0,\n \"dataset\" : {\n \"train\" : {\n \"filenames\" : list(train_examples),\n \"labelled\" : labelled.tolist(),\n \"unlabelled\" : unlabelled.tolist(),\n \"no_label\" : no_label_indices.tolist()\n },\n \"val\" : {\n \"filenames\" : list(val_examples)\n },\n \"test\" : {\n \"filenames\" : list(test_examples)\n }\n }\n }\n with open(state_filename, \"w+\") as f:\n json.dump(state, f, indent=2)\n\n else:\n # Load state\n with open(state_filename, \"r\") as f:\n state = json.load(f)\n # Extract filename properties\n train_examples = np.array(state[\"dataset\"][\"train\"][\"filenames\"])\n val_examples = np.array(state[\"dataset\"][\"val\"][\"filenames\"])\n test_examples = np.array(state[\"dataset\"][\"test\"][\"filenames\"])\n labelled = np.array(state[\"dataset\"][\"train\"][\"labelled\"])\n unlabelled = np.array(state[\"dataset\"][\"train\"][\"unlabelled\"])\n no_label_indices = np.array(state[\"dataset\"][\"train\"][\"no_label\"])\n\n train_input_labelled = np.full_like(train_examples, False, dtype=bool)\n train_input_labelled[labelled] = True\n train_input_indices = np.arange(len(train_examples))\n\n with tf.device(\"/device:CPU:0\"):\n with tf.name_scope(\"Datasets\"):\n # Create input placeholders\n train_input = tt.input.NumpyCapsule()\n train_input.filenames = train_examples\n train_input.labelled = train_input_labelled\n train_input.indices = train_input_indices\n\n val_input = tt.input.NumpyCapsule()\n val_input.filenames = val_examples\n test_input = tt.input.NumpyCapsule()\n test_input.filenames = test_examples\n\n # Setup input pipelines\n train_input_stage = tt.input.InputStage(\n input_shape=[params[\"network\"][\"input\"][\"height\"],\n params[\"network\"][\"input\"][\"width\"]])\n # Validation AND Test input stage\n val_input_stage = tt.input.InputStage(\n input_shape=[params[\"network\"][\"input\"][\"height\"],\n params[\"network\"][\"input\"][\"width\"]])\n\n # Add datasets\n train_input_stage.add_dataset_from_placeholders(\n \"train\", train_input.filenames,\n train_input.labelled, train_input.indices,\n batch_size=params[\"batch_size\"],\n augment=True)\n # Validation set\n val_input_stage.add_dataset_from_placeholders(\n \"val\", val_input.filenames,\n batch_size=params[\"batch_size\"])\n # Test set\n val_input_stage.add_dataset_from_placeholders(\n \"test\", test_input.filenames,\n batch_size=params[\"batch_size\"])\n # Calculate number of batches in each iterator\n val_batches = (len(val_examples) - 1)//params[\"batch_size\"] + 1\n test_batches = (len(test_examples) - 1)//params[\"batch_size\"] + 1\n\n # Get iterator outputs\n train_image_raw, train_image, train_label, train_mask, \\\n train_labelled, train_index = train_input_stage.get_output()\n val_image, val_label, val_mask = val_input_stage.get_output()\n\n # Create step variables\n with tf.variable_scope(\"StepCounters\"):\n global_step = tf.Variable(0, dtype=tf.int64,\n trainable=False, name=\"GlobalStep\")\n local_step = tf.Variable(0, dtype=tf.int64,\n trainable=False, name=\"LocalStep\")\n global_step_op = tf.assign_add(global_step, local_step)\n epoch_step = tf.Variable(0, trainable=False, name=\"EpochStep\")\n epoch_step_inc = tf.assign_add(epoch_step, 1)\n\n # Build training- and validation network\n regularization = {\"drop_rates\": hparams[\"dropout_rates\"]}\n if hparams[\"weight_reg\"][\"L2\"] > 0.0 \\\n or hparams[\"weight_reg\"][\"L1\"] > 0.0:\n regularization = {\n \"weight_regularization\" : tf.keras.regularizers.l1_l2(\n l1=hparams[\"weight_reg\"][\"L1\"],\n l2=hparams[\"weight_reg\"][\"L2\"]),\n \"regularization_scaling\" : hparams[\"weight_reg\"][\"glorot_scaling\"],\n }\n\n # Initialize networks\n train_net = models.ENet(\n dataset.num_classes,\n **regularization\n )\n val_net = models.ENet(dataset.num_classes)\n\n with tf.device(\"/device:GPU:0\"):\n # Build graph for training\n train_logits = train_net(train_image, training=True)\n # Compute predictions: use @train_pred for metrics and\n # @pseudo_label for pseudo_annotation process.\n train_pred = tf.math.argmax(train_logits, axis=-1,\n name=\"TrainPredictions\")\n\n with tf.name_scope(\"PseudoAnnotation\"):\n # Build ops one more time without dropout.\n pseudo_logits = train_net(train_image_raw, training=False)\n # Just make sure not to propagate gradients a second time.\n pseudo_logits = tf.stop_gradient(pseudo_logits)\n pseudo_label = tf.math.argmax(pseudo_logits, axis=-1,\n name=\"TrainPredictions\")\n pseudo_label = tf.cast(pseudo_label, tf.uint8)\n\n # Configure on-line high confidence pseudo labeling.\n pseudo_prob = tf.nn.softmax(pseudo_logits, axis=-1, name=\"TrainProb\")\n if alparams[\"measure\"] == \"entropy\":\n # Reduce entropy over last dimension.\n # Compute prediction entropy\n entropy = - pseudo_prob * tf.math.log(pseudo_prob+EPSILON)\n entropy = tf.math.reduce_sum(entropy, axis=-1)\n # Convert logarithm base to units of number of classes\n # NOTE this will make the metric independent of number of\n # classes as well the range in [0,1]\n log_base = tf.math.log(np.float32(dataset.num_classes))\n entropy = entropy / log_base\n # Convert entropy to confidence\n pseudo_confidence = 1.0 - entropy\n elif alparams[\"measure\"] == \"margin\":\n # Difference between the two largest entries in last dimension.\n values, indices = tf.math.top_k(pseudo_prob, k=2)\n pseudo_confidence = values[:,:,:,0] - values[:,:,:,1]\n elif alparams[\"measure\"] == \"confidence\":\n # Reduce max over last dimension.\n pseudo_confidence = tf.math.reduce_max(pseudo_prob, axis=-1)\n else:\n raise NotImplementedError(\"Uncertainty function not implemented.\")\n pseudo_mean_confidence = tf.reduce_mean(\n tf.cast(pseudo_confidence, tf.float64),\n axis=(1,2))\n # Pseudo annotate high-confidence unlabeled example pixels\n pseudo_mask = tf.where(tf.math.less(pseudo_confidence, alparams[\"threshold\"]),\n tf.zeros_like(pseudo_label,\n dtype=train_label.dtype),\n tf.ones_like(pseudo_label,\n dtype=train_label.dtype))\n # Pseudo annotation logic (think of it as @tf.cond maped \n # over batch dimension)\n train_label = tf.where(train_labelled, train_label,\n pseudo_label, name=\"MaybeGenLabel\")\n train_mask = tf.where(train_labelled, train_mask,\n pseudo_mask, name=\"MaybeGenMask\")\n\n with tf.device(\"/device:GPU:1\"):\n # Build validation network.\n val_logits = val_net(val_image, training=False)\n val_pred = tf.math.argmax(val_logits, axis=-1,\n name=\"ValidationPredictions\")\n\n # Build cost function\n with tf.name_scope(\"Cost\"):\n with tf.device(\"/device:GPU:0\"):\n # Establish loss function\n if hparams[\"softmax\"][\"multiscale\"]:\n loss, loss_weights = \\\n tt.losses.multiscale_masked_softmax_cross_entropy(\n train_label,\n train_net.endpoint_outputs[0],\n train_mask, dataset.num_classes,\n weight=hparams[\"softmax\"][\"loginverse_scaling\"],\n label_smoothing=hparams[\"softmax\"][\"label_smoothing\"],\n scope=\"XEntropy\")\n # NOTE: this will make @loss_weights checkpointed\n train_net.loss_scale_weights = loss_weights\n else:\n loss = tt.losses.masked_softmax_cross_entropy(\n train_label,\n train_logits,\n train_mask, dataset.num_classes,\n weight=hparams[\"softmax\"][\"loginverse_scaling\"],\n label_smoothing=hparams[\"softmax\"][\"label_smoothing\"],\n scope=\"XEntropy\")\n cost = loss\n # Add regularization to cost function\n if len(train_net.losses) > 0:\n regularization_loss = tf.math.add_n(train_net.losses, name=\"Regularization\")\n cost += tf.cast(regularization_loss, dtype=tf.float64)\n\n # Setup learning rate\n learning_rate = hparams[\"learning_rate\"]\n if hparams[\"learning_rate_decay\"] > 0.0:\n # Inverse time learning_rate if lr_decay specified\n learning_rate = tf.train.inverse_time_decay(\n learning_rate, local_step,\n decay_steps=train_batches,\n decay_rate=hparams[\"learning_rate_decay\"])\n\n # Create optimization procedure\n optimizer = tf.train.AdamOptimizer(learning_rate, **hparams[\"optimizer\"][\"kwargs\"])\n\n # Create training op\n train_op = optimizer.minimize(cost, global_step=local_step,\n name=\"TrainOp\")\n # END tf.device(\"/device:GPU:0\")\n # END tf.name_scope(\"Cost\")\n\n # Create summary operations for training and validation network\n with tf.name_scope(\"Summary\"):\n # Create colormap for image summaries\n colormap = tf.constant(dataset.colormap, dtype=tf.uint8,\n name=\"Colormap\")\n # Create metric evaluation and summaries\n with tf.device(\"/device:GPU:0\"):\n with tf.name_scope(\"TrainMetrics\"):\n # Create metrics object for training network.\n train_metrics = tt.metrics.Metrics(train_pred, train_label,\n dataset.num_classes, train_mask)\n # Get Tensorflow update op.\n metric_update_op = train_metrics.get_update_op()\n # Get Tensorflow summary operations.\n metric_summaries = train_metrics.get_summaries()\n\n train_summary_iter = tf.summary.merge(\n [\n # Summaries run at each iteration.\n tf.summary.scalar(\"CrossEntropyLoss\", loss,\n family=\"Losses\"),\n tf.summary.scalar(\"TotalCost\", cost,\n family=\"Losses\"),\n tf.summary.scalar(\"LearningRate\", learning_rate,\n family=\"Losses\")\n ], name=\"IterationSummaries\"\n )\n\n with tf.control_dependencies([metric_update_op]):\n train_summary_epoch = tf.summary.merge(\n [\n # Summaries run at epoch boundaries.\n metric_summaries[\"Metrics\"],\n metric_summaries[\"ConfusionMat\"]\n ], name=\"EpochSummaries\"\n )\n\n train_image_summary = tf.summary.merge(\n [\n tf.summary.image(\n \"PseudoLabel/input\",\n train_image_raw,\n family=\"PseudoLabel\"\n ),\n tf.summary.image(\n \"PseudoLabel/confidence\",\n tf.expand_dims(pseudo_confidence, axis=-1),\n family=\"PseudoLabel\"\n ),\n tf.summary.image(\n \"PseudoLabel\", \n tf.gather(dataset.colormap,\n tf.cast(pseudo_label*pseudo_mask \\\n + (1 - pseudo_mask)*255,\n tf.int32)),\n family=\"PseudoLabel\"\n )\n ]\n )\n # Create metric evaluation and summaries\n with tf.device(\"/device:GPU:1\"):\n with tf.name_scope(\"ValidationTestMetrics\"):\n # Create metrics object\n val_metrics = tt.metrics.Metrics(val_pred, val_label,\n dataset.num_classes, val_mask)\n # Get update tensorflow ops\n val_metric_update_op = val_metrics.get_update_op()\n # Get metric sumaries\n val_metric_summaries = val_metrics.get_summaries()\n\n with tf.control_dependencies([val_metric_update_op]):\n val_metric_summary = tf.summary.merge(\n [\n # \"Expensive\" summaries run at epoch boundaries.\n val_metric_summaries[\"Metrics\"],\n val_metric_summaries[\"ClassMetrics\"],\n val_metric_summaries[\"ConfusionMat\"]\n ], name=\"EpochSummaries\"\n )\n val_image_summary = tf.summary.merge(\n [\n tf.summary.image(\"Input\", val_image),\n tf.summary.image(\"Label\", tf.gather(\n colormap, tf.cast(val_label + 255*(1-val_mask),\n tf.int32))),\n tf.summary.image(\"Predictions\", tf.gather(\n colormap, tf.cast(val_pred, tf.int32)))\n ]\n )\n val_summary_epoch = val_metric_summary\n test_summary_epoch = tf.summary.merge([\n val_metric_summary,\n val_image_summary\n ]\n )\n conf_summary_ph = tf.placeholder(tf.float64, shape=[None])\n conf_summary = tf.summary.histogram(\"ConfidenceDistribution\",\n conf_summary_ph)\n # END name_scope(\"Summary\")\n\n # Create session with soft device placement\n # - some ops neet to run on the CPU\n sess_config = tf.ConfigProto(allow_soft_placement=True)\n sess_config.gpu_options.allow_growth = True\n with tf.Session(config=sess_config) as sess:\n logger.debug(\"Initializing variables...\")\n sess.run(tf.global_variables_initializer())\n\n\n # Create checkpoint object\n with tf.name_scope(\"Checkpoint\"):\n checkpoint = tf.train.Checkpoint(model=train_net,\n epoch=epoch_step,\n step=global_step,\n optimizer=optimizer)\n checkpoint_name = os.path.join(args.log_dir, \"model\")\n if args.checkpoint is not None:\n # CMDline checkpoint given\n ckpt = args.checkpoint\n if os.path.isdir(ckpt):\n ckpt = tf.train.latest_checkpoint(ckpt)\n if ckpt is None:\n logger.error(\"Checkpoint path \\\"%s\\\" is invalid.\")\n return 1\n logger.info(\"Resuming from checkpoint \\\"%s\\\"\" % ckpt)\n status = checkpoint.restore(ckpt)\n if tf.__version__ < \"1.14.0\":\n status.assert_existing_objects_matched()\n else:\n status.expect_partial()\n status.initialize_or_restore(sess)\n if args.reinitialize_output:\n sess.run(train_net.Final.kernel.initializer)\n\n elif state[\"checkpoint\"] != None:\n # Try to restore from checkpoint in logdir\n ckpt = state[\"checkpoint\"]\n logger.info(\"Resuming from checkpoint \\\"%s\\\"\" % ckpt)\n status = checkpoint.restore(ckpt)\n if tf.__version__ < \"1.14.0\":\n status.assert_existing_objects_matched()\n else:\n status.expect_partial()\n status.initialize_or_restore(sess)\n\n with tf.name_scope(\"UpdateValidationWeights\"):\n update_val_op = []\n for i in range(len(val_net.layers)):\n for j in range(len(val_net.layers[i].variables)):\n update_val_op.append(\n tf.assign(val_net.layers[i].variables[j],\n train_net.layers[i].variables[j]))\n update_val_op = tf.group(update_val_op)\n\n ckpt_manager = tt.checkpoint_manager.CheckpointManager(checkpoint,\n args.log_dir)\n # END scope Checkpoint\n # Prepare global fetches dict\n fetches = {\n \"train\" : {\n \"iteration\" : {\n \"step\" : global_step_op,\n \"summary\" : train_summary_iter,\n \"train_op\" : train_op,\n \"update\" : metric_update_op,\n \"updates\" : train_net.updates\n },\n \"epoch\" : {\n \"step\" : epoch_step,\n \"summary\" : train_summary_epoch,\n \"summary/image\" : train_image_summary\n }\n },\n \"val\" : { # Validation and test fetches\n \"iteration\" : {\n \"update\" : val_metric_update_op\n },\n \"epoch\" : {\n \"step\" : epoch_step,\n \"MeanIoU\" : val_metrics.metrics[\"MeanIoU\"],\n \"summary\" : val_summary_epoch,\n # Also add image summary, however only added to\n # writer every N epochs.\n \"summary/image\" : val_image_summary\n }\n },\n \"test\" : {\n \"iteration\" : {\"update\" : val_metric_update_op},\n \"epoch\" : {\"summary\" : test_summary_epoch}\n }\n }\n\n # Train loop (until convergence) -> Pick unlabeled examples -> test_loop\n def train_loop(summary_writer):\n \"\"\"\n Train loop closure.\n Runs training loop untill no improvement is seen in\n @params[\"epochs\"] epochs before returning.\n \"\"\"\n # How many epoch until counting @no_improvement\n _initial_grace_period = alparams[\"epochs/warm_up\"]\n best_ckpt = state[\"checkpoint\"]\n best_mean_iou = 0.0\n log_subdir = summary_writer.get_logdir()\n run_name = os.path.basename(log_subdir)\n checkpoint_prefix = os.path.join(log_subdir, \"model\")\n num_iter_per_epoch = np.maximum(train_input.size,\n val_input.size)\n no_improvement_count = 0\n while no_improvement_count < params[\"epochs\"] \\\n or _initial_grace_period >= 0:\n _initial_grace_period -= 1\n # Increment in-graph epoch counter.\n epoch = sess.run(epoch_step_inc)\n\n # Prepare inner loop iterator\n _iter = range(0, num_iter_per_epoch, params[\"batch_size\"])\n if show_progress:\n _iter = tqdm.tqdm(_iter, desc=\"%s[%d]\" % (run_name, epoch),\n dynamic_ncols=True,\n ascii=True,\n postfix={\"NIC\": no_improvement_count})\n\n # Initialize iterators\n train_input_stage.init_iterator(\n \"train\", sess, train_input.feed_dict)\n val_input_stage.init_iterator(\n \"val\", sess, val_input.feed_dict)\n\n # Reset confusion matrices\n train_metrics.reset_metrics(sess)\n val_metrics.reset_metrics(sess)\n\n # Prepare iteration fetches\n _fetches = {\n \"train\" : {\"iteration\" : fetches[\"train\"][\"iteration\"]},\n \"val\" : {\"iteration\" : fetches[\"val\"][\"iteration\"]}\n }\n # Update validation network weights\n sess.run(update_val_op)\n\n try:\n for i in _iter:\n if train_input.size-params[\"batch_size\"] <= i < train_input.size:\n # Fetches for last training iteration.\n _fetches[\"train\"][\"epoch\"] = fetches[\"train\"][\"epoch\"]\n if val_input.size-params[\"batch_size\"] <= i < val_input.size:\n _fetches[\"val\"][\"epoch\"] = fetches[\"val\"][\"epoch\"]\n\n # Run fetches\n results = sess.run(_fetches)\n\n if \"train\" in results.keys():\n # Add iteration summary\n summary_writer.add_summary(\n results[\"train\"][\"iteration\"][\"summary\"],\n results[\"train\"][\"iteration\"][\"step\"])\n\n # Maybe add epoch summary\n if \"epoch\" in results[\"train\"].keys():\n summary_writer.add_summary(\n results[\"train\"][\"epoch\"][\"summary\"],\n results[\"train\"][\"epoch\"][\"step\"]\n )\n # Pop fetches to prohibit OutOfRangeError due to\n # asymmetric train-/val- input size.\n if results[\"train\"][\"epoch\"][\"step\"] % 100 == 0:\n summary_writer.add_summary(\n results[\"train\"][\"epoch\"][\"summary/image\"],\n results[\"train\"][\"epoch\"][\"step\"]\n )\n _fetches.pop(\"train\")\n\n if \"val\" in results.keys() and \\\n \"epoch\" in results[\"val\"].keys():\n # Add summaries to event log.\n summary_writer.add_summary(\n results[\"val\"][\"epoch\"][\"summary\"],\n results[\"val\"][\"epoch\"][\"step\"]\n )\n if results[\"val\"][\"epoch\"][\"step\"] % 100 == 0:\n # Only report image summary every 100th epoch.\n summary_writer.add_summary(\n results[\"val\"][\"epoch\"][\"summary/image\"],\n results[\"val\"][\"epoch\"][\"step\"]\n )\n # Check if MeanIoU improved and\n # update counter and best\n if results[\"val\"][\"epoch\"][\"MeanIoU\"] > best_mean_iou:\n best_mean_iou = results[\"val\"][\"epoch\"][\"MeanIoU\"]\n # Update checkpoint file used for\n # @tf.train.latest_checkpoint to point at\n # current best.\n _ckpt_name = ckpt_manager.commit(\n checkpoint_prefix, sess)\n if _ckpt_name != \"\":\n best_ckpt = _ckpt_name\n # Reset counter\n no_improvement_count = 0\n else:\n # Result has not improved, increment counter.\n no_improvement_count += 1\n if no_improvement_count >= params[\"epochs\"] and \\\n _initial_grace_period < 0:\n _iter.close()\n break\n if show_progress:\n _iter.set_postfix(NIC=no_improvement_count)\n # Pop fetches to prohibit OutOfRangeError due to\n # asymmetric train-/val- input size.\n _fetches.pop(\"val\")\n # END \"maybe add epoch summary\"\n except tf.errors.OutOfRangeError:\n logger.error(\"Out of range error. Attempting to continue.\")\n pass\n\n summary_writer.flush()\n ckpt_manager.cache(sess)\n # END while no_improvement_count < params[\"epochs\"]\n return best_ckpt\n\n def test_loop(summary_writer):\n \"\"\"\n Test loop closure.\n \"\"\"\n _step = len(labelled)\n # Initialize validation input stage with test set\n val_input_stage.init_iterator(\"test\", sess, test_input.feed_dict)\n _iter = range(0, test_input.size, params[\"batch_size\"])\n if show_progress:\n _iter = tqdm.tqdm(_iter, desc=\"test[%d]\" % (_step),\n ascii=True,\n dynamic_ncols=True)\n summary_proto = None\n val_metrics.reset_metrics(sess)\n try:\n for i in _iter:\n # Accumulate confusion matrix\n if i < test_input.size - params[\"batch_size\"]:\n sess.run(fetches[\"test\"][\"iteration\"][\"update\"])\n else:\n # Run summary operation last iteration\n _, summary_proto = sess.run([fetches[\"test\"][\"iteration\"][\"update\"],\n fetches[\"test\"][\"epoch\"][\"summary\"]])\n except tf.errors.OutOfRangeError:\n pass\n # Add summary with number of labelled examples as step.\n # NOTE this only runs on each major iteration.\n summary_writer.add_summary(\n summary_proto, _step\n )\n\n def rank_confidence():\n # Allocate array to store all confidence scores\n num_examples = len(state[\"dataset\"][\"train\"][\"filenames\"])\n confidence = np.zeros(num_examples, dtype=np.float32)\n # Initialize input stage\n train_input_stage.init_iterator(\"train\", sess,\n train_input.feed_dict)\n _iter = range(0, train_input.size, params[\"batch_size\"])\n if show_progress:\n _iter = tqdm.tqdm(_iter, desc=\"ranking[%d]\" % len(labelled),\n ascii=True,\n dynamic_ncols=True)\n try:\n for i in _iter:\n # Loop over all examples and compute confidence\n batch_confidence, batch_indices = sess.run(\n [pseudo_mean_confidence, train_index])\n # Add to list of confidence\n confidence[batch_indices] = batch_confidence\n except tf.errors.OutOfRangeError:\n pass\n\n # Filter out labelled examples\n unlabelled_confidence = confidence[unlabelled]\n\n selection_size = np.minimum(len(unlabelled),\n alparams[\"selection_size\"])\n # Get the lowest confidence indices of unlabelled subset\n example_indices = np.argpartition(unlabelled_confidence,\n selection_size)\n example_indices = example_indices[:selection_size]\n # Convert to indices into all filenames list\n low_conf_examples = unlabelled[example_indices]\n return low_conf_examples, unlabelled_confidence\n\n checkpoint_path = state[\"checkpoint\"]\n # Only add graph to first event file\n _graph = sess.graph if checkpoint_path == None else None\n with tf.summary.FileWriter(args.log_dir, graph=_graph) as test_writer:\n iterations = alparams[\"iterations\"]\n if iterations < 0:\n # Iterate untill all data is consumed\n iterations = np.ceil(len(unlabelled)\n / float(alparams[\"selection_size\"]))\n logger.info(\"Iteration count: %d\" % iterations)\n\n while state[\"iteration\"] < iterations:\n # Step 1: train_loop\n train_input.set_indices(labelled)\n\n if state[\"iteration\"] == 0:\n # Pretrain\n log_subdir = os.path.join(args.log_dir, \"pretrain\")\n # Only use labelled subset\n else:\n # Any other iteration\n log_subdir = os.path.join(args.log_dir, \"iter-%d\" %\n state[\"iteration\"])\n # Sample from the unlabelled set\n p = alparams[\"pseudo_labelling_proportion\"]\n sample_size = int(len(labelled)*p/(1-p))\n sample_size = np.minimum(sample_size, len(unlabelled))\n train_input.set_sample_size(sample_size)\n\n # Create subdir if it doesn't exist\n if not os.path.exists(log_subdir):\n os.mkdir(log_subdir)\n\n # Change checkpoint manager directory\n ckpt_manager.chdir(log_subdir)\n with tf.summary.FileWriter(log_subdir) as train_val_writer:\n # Enter train loop\n try:\n checkpoint_path = train_loop(train_val_writer)\n except KeyboardInterrupt as exception:\n # Quickly store state\n if ckpt_manager.latest_checkpoint != \"\":\n state[\"checkpoint\"] = ckpt_manager.latest_checkpoint\n with open(state_filename, \"w\") as f:\n json.dump(state, f, indent=2)\n f.truncate()\n raise exception\n\n\n # Reload best checkpoint\n status = checkpoint.restore(checkpoint_path)\n status.run_restore_ops(sess)\n sess.run(update_val_op)\n\n # Step 2: test_loop\n if test_input.size > 0:\n # This step may be omitted on deployment\n test_loop(test_writer)\n\n # Step 3: Find low confidence examples\n # Reset train_input to use all examples for ranking\n train_input.set_indices()\n if alparams[\"selection_size\"] > 0:\n low_conf_examples, unlabelled_conf = rank_confidence()\n _hist_summary = sess.run(conf_summary,\n {conf_summary_ph: \n unlabelled_conf})\n test_writer.add_summary(_hist_summary, state[\"iteration\"])\n else:\n # Draw examples randomly\n selection_size = np.minimum(alparams[\"selection_size\"],\n len(unlabelled.tolist()))\n if selection_size != 0:\n low_conf_examples = np.random.choice(\n unlabelled, np.abs(alparams[\"selection_size\"]))\n else:\n low_conf_examples = []\n\n # (maybe) Pause for user to annotate\n to_annotate_indices = no_label_indices[np.isin(\n no_label_indices, low_conf_examples)]\n\n while len(to_annotate_indices) > 0:\n to_annotate = train_examples[to_annotate_indices]\n # Poll user for filenames of annotated examples\n logger.info(\"Please annotate the following examples:\\n%s\" %\n \"\\n\".join(to_annotate_basename.tolist()))\n filenames = tkinter.filedialog.askopenfilename(\n multiple=1,\n filetypes=((\"TFRecord\", \"*.tfrecord\"),))\n\n hit = [] # List of matching filename indices\n for filename in filenames:\n basename = os.path.basename(filename)\n idx = -1\n for i in range(len(to_annotate)):\n if to_annotate[i].endswith(basename):\n idx = i\n break\n if idx != -1:\n # Update state filenames\n train_examples[to_annotate_indices[idx]] = filename\n hit.append(idx)\n else:\n logger.info(\"Unrecognized filepath: %s\" % filename)\n # Remove matched paths\n to_annotate_indices = np.delete(to_annotate_indices, hit)\n\n\n # Remove annotated examples from unlabelled set\n no_label_indices = no_label_indices[np.isin(no_label_indices,\n low_conf_examples,\n invert=True)]\n\n\n logger.info(\n \"Moving following examples to labelled set:\\n%s\" %\n \"\\n\".join(train_examples[low_conf_examples].tolist())\n )\n # First make the update to input stage before\n # commiting state change\n train_input_labelled[low_conf_examples] = True\n train_input.labelled = train_input_labelled\n\n\n # Step 4: Update state information\n labelled = np.append(labelled, low_conf_examples)\n unlabelled = unlabelled[np.isin(unlabelled, low_conf_examples,\n assume_unique=True, invert=True)]\n state[\"dataset\"][\"train\"][\"filenames\"] = train_examples.tolist()\n state[\"dataset\"][\"train\"][\"labelled\"] = labelled.tolist()\n state[\"dataset\"][\"train\"][\"unlabelled\"] = unlabelled.tolist()\n state[\"iteration\"] += 1\n state[\"checkpoint\"] = checkpoint_path\n # Dump updated state\n with open(state_filename, \"w\") as f:\n json.dump(state, f, indent=2)\n f.truncate()\n return 0\n\nclass HelpfullParser(argparse.ArgumentParser):\n # Prints help instead of usage string on error\n def error(self, message):\n self.print_help()\n self.exit(2, \"error: %s\\n\" % message)\n\ndef parse_arguments():\n \"\"\"\n Handles parseing of commandline arguments\n\n :returns: The parsed commandline options\n :rtype: argparse.Namespace\n \"\"\"\n # Required arguments\n req_parser = argparse.ArgumentParser(add_help=False)\n req_group = req_parser.add_argument_group(title=\"Required arguments\")\n req_group.add_argument(\n \"-d\", \"--data-dir\",\n required=True,\n type=str,\n dest=\"data_dir\",\n help=\"Path to dataset root directory\"\n )\n req_group.add_argument(\n \"-l\", \"--log-dir\",\n required=True,\n type=str,\n dest=\"log_dir\",\n metavar=\"LOG_DIR\",\n help=\"Logdirectory for the session.\"\n )\n req_group.add_argument(\n \"-p\", \"--parameters\",\n required=True,\n type=str,\n dest=\"params\",\n metavar=\"PARAM_FILE\",\n help=\"Path to parameter configuration file, see conf subdirectory.\"\n )\n #Optional arguments\n opt_parser = argparse.ArgumentParser(add_help=False)\n opt_parser.add_argument(\n \"-c\", \"--checkpoint\",\n type=str,\n dest=\"checkpoint\", required=False,\n metavar=\"CHECKPOINT\",\n help=\"Path to pretrained checkpoint directory or model.\"\n )\n opt_parser.add_argument(\n \"-r\", \"--reinitialize-output-layer\",\n action=\"store_true\",\n dest=\"reinitialize_output\", required=False,\n help=\"Reinitialize last layer of model (if checkpoint specified).\"\n )\n opt_parser.add_argument(\n \"-u\", \"--unlabelled-dir\",\n type=str,\n default=None,\n dest=\"unlabelled\",\n metavar=\"UNLABELLED_GLOB\",\n help=\"Path to directory containing only feature data.\"\n )\n\n # Create parser hierarchy\n # Top parser\n top_parser = argparse.ArgumentParser(\n usage=\"%s {cityscapes,freiburg,vistas} [-h/--help]\"\n % sys.argv[0])\n\n # Dataset specific parsers inherits required arguments.\n data_parsers = top_parser.add_subparsers(parser_class=HelpfullParser)\n # Cityscapes dataset\n cityscapes = data_parsers.add_parser(\n \"cityscapes\",\n usage=\"%s {cityscapes,freiburg} -d DATA_DIR -l LOG_DIR [options]\"\n % sys.argv[0],\n parents=[req_parser,opt_parser],\n conflict_handler=\"resolve\",\n help=\"The Cityscapes dataset.\")\n cityscapes.set_defaults(dataset=\"cityscapes\")\n cityscapes.add_argument(\"--use-coarse\",\n action=\"store_true\",\n required=False,\n dest=\"coarse\")\n # Mapillary Vistas dataset\n vistas = data_parsers.add_parser(\n \"vistas\",\n usage=\"%s {cityscapes,freiburg,vistas} -d DATA_DIR -l LOG_DIR [options]\"\n % sys.argv[0],\n parents=[req_parser,opt_parser],\n conflict_handler=\"resolve\",\n help=\"The Mapillary Vistas dataset.\")\n vistas.set_defaults(dataset=\"vistas\")\n\n # Freiburg forrest dataset\n freiburg = data_parsers.add_parser(\n \"freiburg\",\n usage=\"%s {cityscapes,freiburg} -d DATA_DIR -l LOG_DIR [options]\"\n % sys.argv[0],\n parents=[req_parser,opt_parser],\n conflict_handler=\"resolve\",\n help=\"The Freiburg Forest dataset.\")\n freiburg.set_defaults(dataset=\"freiburg\")\n freiburg.add_argument(\"-m\", \"--modalities\",\n type=str,\n nargs=\"+\",\n required=False,\n default=[],\n help=\"Path to Freiburg Forest root directory.\")\n if not \"freiburg\" in sys.argv and \\\n not \"cityscapes\" in sys.argv and \\\n not \"vistas\" in sys.argv:\n top_parser.print_help()\n sys.exit(0)\n args = top_parser.parse_args()\n\n return args\n\nif __name__ == \"__main__\":\n # Get and configure logger\n logger = logging.getLogger(__name__)\n with open(\"util/logging.json\") as conf:\n conf_dict = json.load(conf)\n logging.config.dictConfig(conf_dict)\n del conf_dict\n args = parse_arguments()\n # Load parameters\n parameters = None\n with open(args.params, \"r\") as f:\n parameters = json.load(f)\n # Overwrite with parameter dict\n args.params = parameters\n sys.exit(main(args, logger))\n" ]
[ [ "tensorflow.device", "tensorflow.control_dependencies", "tensorflow.cast", "numpy.concatenate", "tensorflow.where", "tensorflow.train.AdamOptimizer", "tensorflow.keras.regularizers.l1_l2", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.math.argmax", "tensorflow.assign_add", "tensorflow.Variable", "tensorflow.summary.image", "numpy.finfo", "tensorflow.ConfigProto", "tensorflow.stop_gradient", "tensorflow.math.reduce_sum", "tensorflow.name_scope", "tensorflow.Session", "numpy.argpartition", "numpy.float32", "numpy.zeros", "numpy.isin", "tensorflow.math.reduce_max", "tensorflow.train.Checkpoint", "tensorflow.placeholder", "numpy.full_like", "tensorflow.global_variables_initializer", "tensorflow.zeros_like", "tensorflow.math.add_n", "numpy.append", "numpy.delete", "numpy.array", "tensorflow.math.less", "tensorflow.summary.merge", "tensorflow.summary.histogram", "tensorflow.nn.softmax", "tensorflow.constant", "numpy.maximum", "tensorflow.summary.FileWriter", "tensorflow.train.latest_checkpoint", "numpy.abs", "tensorflow.assign", "tensorflow.ones_like", "tensorflow.expand_dims", "numpy.random.shuffle", "tensorflow.math.log", "tensorflow.variable_scope", "tensorflow.math.top_k", "tensorflow.train.inverse_time_decay", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
naveenarun/MolGAN
[ "c304707144ec9a4870390011aa73cdc7078a0e9d" ]
[ "utils/sparse_molecular_dataset.py" ]
[ "import pickle\nimport numpy as np\n\nfrom rdkit import Chem\n\nif __name__ == '__main__':\n from progress_bar import ProgressBar\nelse:\n from utils.progress_bar import ProgressBar\n\nfrom datetime import datetime\n\n\nclass SparseMolecularDataset():\n\n def load(self, filename, subset=1):\n\n with open(filename, 'rb') as f:\n self.__dict__.update(pickle.load(f))\n\n self.train_idx = np.random.choice(self.train_idx, int(len(self.train_idx) * subset), replace=False)\n self.validation_idx = np.random.choice(self.validation_idx, int(len(self.validation_idx) * subset),\n replace=False)\n self.test_idx = np.random.choice(self.test_idx, int(len(self.test_idx) * subset), replace=False)\n\n self.train_count = len(self.train_idx)\n self.validation_count = len(self.validation_idx)\n self.test_count = len(self.test_idx)\n\n self.__len = self.train_count + self.validation_count + self.test_count\n\n def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump(self.__dict__, f)\n\n def generate(self, filename, add_h=False, filters=lambda x: True, size=None, validation=0.1, test=0.1):\n self.log('Extracting {}..'.format(filename))\n\n if filename.endswith('.sdf'):\n self.data = list(filter(lambda x: x is not None, Chem.SDMolSupplier(filename)))\n elif filename.endswith('.smi'):\n self.data = [Chem.MolFromSmiles(line) for line in open(filename, 'r').readlines()]\n\n self.data = list(map(Chem.AddHs, self.data)) if add_h else self.data\n self.data = list(filter(filters, self.data))\n self.data = self.data[:size]\n\n self.log('Extracted {} out of {} molecules {}adding Hydrogen!'.format(len(self.data),\n len(Chem.SDMolSupplier(filename)),\n '' if add_h else 'not '))\n\n self._generate_encoders_decoders()\n self._generate_AX()\n\n # it contains the all the molecules stored as rdkit.Chem objects\n self.data = np.array(self.data)\n\n # it contains the all the molecules stored as SMILES strings\n self.smiles = np.array(self.smiles)\n\n # a (N, L) matrix where N is the length of the dataset and each L-dim vector contains the \n # indices corresponding to a SMILE sequences with padding wrt the max length of the longest \n # SMILES sequence in the dataset (see self._genS)\n self.data_S = np.stack(self.data_S)\n\n # a (N, 9, 9) tensor where N is the length of the dataset and each 9x9 matrix contains the \n # indices of the positions of the ones in the one-hot representation of the adjacency tensor\n # (see self._genA)\n self.data_A = np.stack(self.data_A)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the \n # indices of the positions of the ones in the one-hot representation of the annotation matrix\n # (see self._genX)\n self.data_X = np.stack(self.data_X)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the \n # diagonal of the correspondent adjacency matrix\n self.data_D = np.stack(self.data_D)\n\n # a (N, F) matrix where N is the length of the dataset and each F vector contains features \n # of the correspondent molecule (see self._genF)\n self.data_F = np.stack(self.data_F)\n\n # a (N, 9) matrix where N is the length of the dataset and each 9-dim vector contains the\n # eigenvalues of the correspondent Laplacian matrix\n self.data_Le = np.stack(self.data_Le)\n\n # a (N, 9, 9) matrix where N is the length of the dataset and each 9x9 matrix contains the \n # eigenvectors of the correspondent Laplacian matrix\n self.data_Lv = np.stack(self.data_Lv) \n\n self.vertexes = self.data_F.shape[-2]\n self.features = self.data_F.shape[-1]\n\n self._generate_train_validation_test(validation, test)\n\n def _generate_encoders_decoders(self):\n self.log('Creating atoms encoder and decoder..')\n atom_labels = sorted(set([atom.GetAtomicNum() for mol in self.data for atom in mol.GetAtoms()] + [0]))\n self.atom_encoder_m = {l: i for i, l in enumerate(atom_labels)}\n self.atom_decoder_m = {i: l for i, l in enumerate(atom_labels)}\n self.atom_num_types = len(atom_labels)\n self.log('Created atoms encoder and decoder with {} atom types and 1 PAD symbol!'.format(\n self.atom_num_types - 1))\n\n self.log('Creating bonds encoder and decoder..')\n bond_labels = [Chem.rdchem.BondType.ZERO] + list(sorted(set(bond.GetBondType()\n for mol in self.data\n for bond in mol.GetBonds())))\n\n self.bond_encoder_m = {l: i for i, l in enumerate(bond_labels)}\n self.bond_decoder_m = {i: l for i, l in enumerate(bond_labels)}\n self.bond_num_types = len(bond_labels)\n self.log('Created bonds encoder and decoder with {} bond types and 1 PAD symbol!'.format(\n self.bond_num_types - 1))\n\n self.log('Creating SMILES encoder and decoder..')\n smiles_labels = ['E'] + list(set(c for mol in self.data for c in Chem.MolToSmiles(mol)))\n self.smiles_encoder_m = {l: i for i, l in enumerate(smiles_labels)}\n self.smiles_decoder_m = {i: l for i, l in enumerate(smiles_labels)}\n self.smiles_num_types = len(smiles_labels)\n self.log('Created SMILES encoder and decoder with {} types and 1 PAD symbol!'.format(\n self.smiles_num_types - 1))\n\n def _generate_AX(self):\n self.log('Creating features and adjacency matrices..')\n pr = ProgressBar(60, len(self.data))\n\n data = []\n smiles = []\n data_S = []\n data_A = []\n data_X = []\n data_D = []\n data_F = []\n data_Le = []\n data_Lv = []\n\n max_length = max(mol.GetNumAtoms() for mol in self.data)\n max_length_s = max(len(Chem.MolToSmiles(mol)) for mol in self.data)\n\n for i, mol in enumerate(self.data):\n A = self._genA(mol, connected=True, max_length=max_length)\n D = np.count_nonzero(A, -1)\n if A is not None:\n data.append(mol)\n smiles.append(Chem.MolToSmiles(mol))\n data_S.append(self._genS(mol, max_length=max_length_s))\n data_A.append(A)\n data_X.append(self._genX(mol, max_length=max_length))\n data_D.append(D)\n data_F.append(self._genF(mol, max_length=max_length))\n\n L = D - A\n Le, Lv = np.linalg.eigh(L)\n\n data_Le.append(Le)\n data_Lv.append(Lv)\n\n pr.update(i + 1)\n\n self.log(date=False)\n self.log('Created {} features and adjacency matrices out of {} molecules!'.format(len(data),\n len(self.data)))\n\n self.data = data\n self.smiles = smiles\n self.data_S = data_S\n self.data_A = data_A\n self.data_X = data_X\n self.data_D = data_D\n self.data_F = data_F\n self.data_Le = data_Le\n self.data_Lv = data_Lv\n self.__len = len(self.data)\n\n def _genA(self, mol, connected=True, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n A = np.zeros(shape=(max_length, max_length), dtype=np.int32)\n\n begin, end = [b.GetBeginAtomIdx() for b in mol.GetBonds()], [b.GetEndAtomIdx() for b in mol.GetBonds()]\n bond_type = [self.bond_encoder_m[b.GetBondType()] for b in mol.GetBonds()]\n\n A[begin, end] = bond_type\n A[end, begin] = bond_type\n\n degree = np.sum(A[:mol.GetNumAtoms(), :mol.GetNumAtoms()], axis=-1)\n\n return A if connected and (degree > 0).all() else None\n\n def _genX(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n return np.array([self.atom_encoder_m[atom.GetAtomicNum()] for atom in mol.GetAtoms()] + [0] * (\n max_length - mol.GetNumAtoms()), dtype=np.int32)\n\n def _genS(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else len(Chem.MolToSmiles(mol))\n\n return np.array([self.smiles_encoder_m[c] for c in Chem.MolToSmiles(mol)] + [self.smiles_encoder_m['E']] * (\n max_length - len(Chem.MolToSmiles(mol))), dtype=np.int32)\n\n def _genF(self, mol, max_length=None):\n\n max_length = max_length if max_length is not None else mol.GetNumAtoms()\n\n features = np.array([[*[a.GetDegree() == i for i in range(5)],\n *[a.GetExplicitValence() == i for i in range(9)],\n *[int(a.GetHybridization()) == i for i in range(1, 7)],\n *[a.GetImplicitValence() == i for i in range(9)],\n a.GetIsAromatic(),\n a.GetNoImplicit(),\n *[a.GetNumExplicitHs() == i for i in range(5)],\n *[a.GetNumImplicitHs() == i for i in range(5)],\n *[a.GetNumRadicalElectrons() == i for i in range(5)],\n a.IsInRing(),\n *[a.IsInRingSize(i) for i in range(2, 9)]] for a in mol.GetAtoms()], dtype=np.int32)\n\n return np.vstack((features, np.zeros((max_length - features.shape[0], features.shape[1]))))\n\n def matrices2mol(self, node_labels, edge_labels, strict=False):\n mol = Chem.RWMol()\n\n for node_label in node_labels:\n mol.AddAtom(Chem.Atom(self.atom_decoder_m[node_label]))\n\n for start, end in zip(*np.nonzero(edge_labels)):\n if start > end:\n mol.AddBond(int(start), int(end), self.bond_decoder_m[edge_labels[start, end]])\n\n if strict:\n try:\n Chem.SanitizeMol(mol)\n except:\n mol = None\n\n return mol\n\n def seq2mol(self, seq, strict=False):\n mol = Chem.MolFromSmiles(''.join([self.smiles_decoder_m[e] for e in seq if e != 0]))\n\n if strict:\n try:\n Chem.SanitizeMol(mol)\n except:\n mol = None\n\n return mol\n\n def _generate_train_validation_test(self, validation, test):\n\n self.log('Creating train, validation and test sets..')\n\n validation = int(validation * len(self))\n test = int(test * len(self))\n train = len(self) - validation - test\n\n self.all_idx = np.random.permutation(len(self))\n self.train_idx = self.all_idx[0:train]\n self.validation_idx = self.all_idx[train:train + validation]\n self.test_idx = self.all_idx[train + validation:]\n\n self.train_counter = 0\n self.validation_counter = 0\n self.test_counter = 0\n\n self.train_count = train\n self.validation_count = validation\n self.test_count = test\n\n self.log('Created train ({} items), validation ({} items) and test ({} items) sets!'.format(\n train, validation, test))\n\n def _next_batch(self, counter, count, idx, batch_size):\n if batch_size is not None:\n if counter + batch_size >= count:\n counter = 0\n np.random.shuffle(idx)\n\n output = [obj[idx[counter:counter + batch_size]]\n for obj in (self.data, self.smiles, self.data_S, self.data_A, self.data_X,\n self.data_D, self.data_F, self.data_Le, self.data_Lv)]\n\n counter += batch_size\n else:\n output = [obj[idx] for obj in (self.data, self.smiles, self.data_S, self.data_A, self.data_X,\n self.data_D, self.data_F, self.data_Le, self.data_Lv)]\n\n return [counter] + output\n\n def next_train_batch(self, batch_size=None):\n out = self._next_batch(counter=self.train_counter, count=self.train_count,\n idx=self.train_idx, batch_size=batch_size)\n self.train_counter = out[0]\n\n return out[1:]\n\n def next_validation_batch(self, batch_size=None):\n out = self._next_batch(counter=self.validation_counter, count=self.validation_count,\n idx=self.validation_idx, batch_size=batch_size)\n self.validation_counter = out[0]\n\n return out[1:]\n\n def next_test_batch(self, batch_size=None):\n out = self._next_batch(counter=self.test_counter, count=self.test_count,\n idx=self.test_idx, batch_size=batch_size)\n self.test_counter = out[0]\n\n return out[1:]\n\n @staticmethod\n def log(msg='', date=True):\n print(str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + ' ' + str(msg) if date else str(msg))\n\n def __len__(self):\n return self.__len\n\n\nif __name__ == '__main__':\n data = SparseMolecularDataset()\n data.generate('data/gdb9.sdf', filters=lambda x: x.GetNumAtoms() <= 9)\n data.save('data/gdb9_9nodes.sparsedataset')\n\n # data = SparseMolecularDataset()\n # data.generate('data/qm9_5k.smi', validation=0.00021, test=0.00021) # , filters=lambda x: x.GetNumAtoms() <= 9)\n # data.save('data/qm9_5k.sparsedataset')\n" ]
[ [ "numpy.nonzero", "numpy.stack", "numpy.random.shuffle", "numpy.linalg.eigh", "numpy.count_nonzero", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YufengChenK/scVI-1
[ "c05237c384c59f1fd783ee1f45e75d108bcabf4e" ]
[ "scvi/dataset/csv.py" ]
[ "from .dataset import GeneExpressionDataset\nimport pandas as pd\nimport numpy as np\nimport os\n\n\nclass CsvDataset(GeneExpressionDataset):\n r\"\"\" Loads a `.csv` file.\n\n Args:\n :filename: Name of the `.csv` file.\n :save_path: Save path of the dataset. Default: ``'data/'``.\n :url: Url of the remote dataset. Default: ``None``.\n :new_n_genes: Number of subsampled genes. Default: ``600``.\n :subset_genes: List of genes for subsampling. Default: ``None``.\n :compression: For on-the-fly decompression of on-disk data. If ‘infer’ and filepath_or_buffer\n is path-like, then detect compression from the following extensions: ‘.gz’, ‘.bz2’, ‘.zip’, or ‘.xz’\n (otherwise no decompression). If using ‘zip’, the ZIP file must contain only one data file to be read in.\n Default: ``None``.\n :batch_ids_file: Name of the `.csv` file with batch indices.\n File contains two columns. The first holds gene names and second\n holds batch indices - type int. The first row of the file is header.\n\n Examples:\n >>> # Loading a remote dataset\n >>> remote_url = \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE100866&format=file&file=\" \\\n ... \"GSE100866%5FCBMC%5F8K%5F13AB%5F10X%2DRNA%5Fumi%2Ecsv%2Egz\")\n >>> remote_csv_dataset = CsvDataset(\"GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz\", save_path='data/',\n ... compression='gzip', url=remote_url)\n >>> # Loading a local dataset\n >>> local_csv_dataset = CsvDataset(\"GSE100866_CBMC_8K_13AB_10X-RNA_umi.csv.gz\",\n ... save_path='data/', compression='gzip')\n\n \"\"\"\n\n def __init__(self, filename, save_path='data/', url=None, new_n_genes=600, subset_genes=None,\n compression=None, sep=',', gene_by_cell=True, labels_file=None,\n batch_ids_file=None):\n self.download_name = filename # The given csv file is\n self.save_path = save_path\n self.url = url\n self.compression = compression\n self.sep = sep\n self.gene_by_cell = gene_by_cell # Whether the original dataset is genes by cells\n self.labels_file = labels_file\n self.batch_ids_file = batch_ids_file\n\n data, gene_names, labels, cell_types, batch_ids = self.download_and_preprocess()\n\n super().__init__(\n *GeneExpressionDataset.get_attributes_from_matrix(\n data, labels=labels,\n batch_indices=batch_ids if batch_ids is not None else 0),\n gene_names=gene_names, cell_types=cell_types)\n\n self.subsample_genes(new_n_genes, subset_genes)\n\n def preprocess(self):\n print(\"Preprocessing dataset\")\n\n if self.gene_by_cell:\n data = pd.read_csv(os.path.join(self.save_path, self.download_name),\n sep=self.sep, index_col=0, compression=self.compression).T\n else:\n data = pd.read_csv(os.path.join(self.save_path, self.download_name),\n sep=self.sep, index_col=0, compression=self.compression)\n\n gene_names = np.array(data.columns, dtype=str)\n labels, cell_types, batch_ids = None, None, None\n if self.labels_file is not None:\n labels = pd.read_csv(os.path.join(self.save_path, self.labels_file), header=0, index_col=0)\n labels = labels.values\n cell_types = np.unique(labels)\n\n if self.batch_ids_file is not None:\n batch_ids = pd.read_csv(\n os.path.join(\n self.save_path, self.batch_ids_file), header=0, index_col=0)\n batch_ids = batch_ids.values\n\n data = data.values\n print(\"Finished preprocessing dataset\")\n return data, gene_names, labels, cell_types, batch_ids\n\n\nclass BreastCancerDataset(CsvDataset):\n def __init__(self, save_path='data/'):\n super().__init__(\"Layer2_BC_count_matrix-1.tsv\", save_path=save_path,\n url=\"http://www.spatialtranscriptomicsresearch.org/wp-content/\"\n \"uploads/2016/07/Layer2_BC_count_matrix-1.tsv\",\n sep='\\t', gene_by_cell=False)\n\n\nclass MouseOBDataset(CsvDataset):\n def __init__(self, save_path='data/'):\n super().__init__(\"Rep11_MOB_count_matrix-1.tsv\", save_path=save_path,\n url=\"http://www.spatialtranscriptomicsresearch.org/wp-content/uploads/\"\n \"2016/07/Rep11_MOB_count_matrix-1.tsv\",\n sep='\\t', gene_by_cell=False)\n" ]
[ [ "numpy.array", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Holmeswww/Text_Infilling
[ "f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3", "f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3", "f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3", "f63cd24bee5c62d7dedd8fb35c4e52aee20c39f3" ]
[ "texar/modules/policies/policy_nets_test.py", "texar/data/data_decoders.py", "text_infilling/self_attn.py", "texar/losses/adv_losses.py" ]
[ "#\n\"\"\"\nTests policy nets.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport tensorflow as tf\n\nfrom texar.modules.policies.policy_nets import CategoricalPolicyNet\n\nclass CategoricalPolicyNetTest(tf.test.TestCase):\n \"\"\"Tests :class:`texar.modules.CategoricalPolicyNet`.\n \"\"\"\n\n def test_categorical_policy(self):\n \"\"\"Tests logics.\n \"\"\"\n policy = CategoricalPolicyNet()\n\n inputs = tf.random_uniform(shape=[64, 4])\n outputs = policy(inputs=inputs)\n self.assertEqual(outputs['action'].shape, outputs['log_prob'].shape)\n self.assertIsInstance(\n outputs['distribution'], tf.distributions.Categorical)\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# -*- coding: utf-8 -*-\n#\n\"\"\"\nHelper functions and classes for decoding text data which are used after\nreading raw text data.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.python.slim.data import data_decoder\n\nfrom texar.data.vocabulary import SpecialTokens\n\n# pylint: disable=too-many-instance-attributes, too-many-arguments,\n# pylint: disable=no-member, invalid-name\n\n__all__ = [\n \"ScalarDataDecoder\",\n \"TextDataDecoder\",\n \"VarUttTextDataDecoder\"\n]\n\ndef _append_token(token):\n return token is not None and token != \"\"\n\nclass ScalarDataDecoder(data_decoder.DataDecoder):\n \"\"\"A data decoder that decodes a scalar, e.g., int label or float number.\n\n The only operation is to cast the data into a specified data type.\n\n Args:\n dtype: A :tf_main:`tf DType <DType>` that data is cast into. Can be\n `tf.int32` or `tf.float32`.\n data_name (str): Name of the decoded data.\n \"\"\"\n\n def __init__(self, dtype=tf.int32, data_name=\"data\"):\n self._dtype = dtype\n self._data_name = data_name\n if self._data_name is None:\n self._data_name = \"data\"\n\n def __call__(self, data):\n outputs = self.decode(data, self.list_items())\n return dict(zip(self.list_items(), outputs))\n\n def decode(self, data, items):\n \"\"\"Decodes the data to return the tensors specified by the list of\n items.\n\n Args:\n data: The scalar data to decode.\n items: A list of strings, each of which is the name of the resulting\n tensors to retrieve.\n\n Returns:\n A list of tensors, each of which corresponds to each item.\n \"\"\"\n data = tf.reshape(data, shape=[])\n if data.dtype is tf.string:\n decoded_data = tf.string_to_number(data, out_type=self._dtype)\n else:\n decoded_data = tf.cast(data, self._dtype),\n outputs = {\n self._data_name: decoded_data\n }\n return [outputs[item] for item in items]\n\n def list_items(self):\n \"\"\"Returns the list of item names that the decoder can produce.\n\n Returns:\n A list of strings can be passed to :meth:`decode()`.\n \"\"\"\n return [self._data_name]\n\n @property\n def data_tensor_name(self):\n \"\"\"The name of the data tensor.\n \"\"\"\n return self._data_name\n\nclass TextDataDecoder(data_decoder.DataDecoder):\n \"\"\"A text data decoder that decodes raw text data.\n\n Operations include splitting on word or character level, truncation,\n inserting special tokens, mapping text units to indexes, etc.\n\n Args:\n split_level (str): The name of split level on which text sequence is\n split. Either \"word\" or \"char\".\n delimiter (str): The delimiter character used when splitting on word\n level.\n bos_token (str, optional): Special token added to the beginning of\n sequences. If it is `None` (default) or an empty string, no\n BOS token is added.\n eos_token (str, optional): Special tokan added to the end of\n sequences. If it is `None` (default) or an empty string, no EOS\n token is added.\n max_seq_length (int, optional): Maximum length of output sequences.\n Tokens exceeding the maximum length will be truncated. The length\n does not include any added bos_token and eos_token. If not\n given, no truncation is performed.\n token_to_id_map (optional): A\n :class:`~tensorflow.contrib.lookup.HashTable` instance that maps\n token strings to integer indexes. If not given, the decoder will\n not decode text into indexes. :attr:`bos_token` and\n :attr:`eos_token` (if given) should have entries in the\n :attr:`token_to_id_map` (if given).\n text_tensor_name (str): Name of the text tensor results. Used as a\n key to retrieve the text tensor.\n length_tensor_name (str): Name of the text length tensor results.\n text_id_tensor_name (str): Name of the text index tensor results.\n \"\"\"\n\n def __init__(self,\n split_level=\"word\",\n delimiter=\" \",\n bos_token=None,\n eos_token=None,\n max_seq_length=None,\n token_to_id_map=None,\n text_tensor_name=\"text\",\n length_tensor_name=\"length\",\n text_id_tensor_name=\"text_ids\"):\n self._split_level = split_level\n self._delimiter = delimiter\n self._bos_token = bos_token\n self._eos_token = eos_token\n self._max_seq_length = max_seq_length\n self._token_to_id_map = token_to_id_map\n self._text_tensor_name = text_tensor_name\n self._text_id_tensor_name = text_id_tensor_name\n self._length_tensor_name = length_tensor_name\n self._added_length = 0\n\n def __call__(self, data):\n outputs = self.decode(data, self.list_items())\n return dict(zip(self.list_items(), outputs))\n\n def decode(self, data, items):\n \"\"\"Decodes the data to return the tensors specified by the list of\n items.\n\n Args:\n data: The text data to decode.\n items: A list of strings, each of which is the name of the resulting\n tensors to retrieve.\n\n Returns:\n A list of tensors, each of which corresponds to each item. If\n `token_to_id_map` is not given when constructing the decoder,\n returns `None` for the token index item.\n \"\"\"\n # Split\n if self._split_level == \"word\":\n tokens = tf.string_split([data], delimiter=self._delimiter).values\n elif self._split_level == \"char\":\n raise NotImplementedError\n else:\n raise ValueError(\"Unknown split level: %s\" % self._split_level)\n\n # Truncate\n if self._max_seq_length is not None:\n tokens = tokens[:self._max_seq_length]\n\n # Add BOS/EOS tokens\n if _append_token(self._bos_token):\n tokens = tf.concat([[self._bos_token], tokens], axis=0)\n self._added_length += 1\n if _append_token(self._eos_token):\n tokens = tf.concat([tokens, [self._eos_token]], axis=0)\n self._added_length += 1\n\n # Map to index\n token_ids = None\n if self._token_to_id_map is not None:\n token_ids = self._token_to_id_map.lookup(tokens)\n\n outputs = {\n self._text_tensor_name: tokens,\n self._length_tensor_name: tf.size(tokens),\n self._text_id_tensor_name: token_ids\n }\n return [outputs[item] for item in items]\n\n def list_items(self):\n \"\"\"Returns the list of item names that the decoder can produce.\n\n Returns:\n A list of strings can be passed to :meth:`decode()`.\n \"\"\"\n return [self._text_tensor_name,\n self._length_tensor_name,\n self._text_id_tensor_name]\n\n @property\n def text_tensor_name(self):\n \"\"\"The name of text tensor.\n \"\"\"\n return self._text_tensor_name\n\n @text_tensor_name.setter\n def text_tensor_name(self, name):\n self._text_tensor_name = name\n\n @property\n def length_tensor_name(self):\n \"\"\"The name of length tensor.\n \"\"\"\n return self._length_tensor_name\n\n @length_tensor_name.setter\n def length_tensor_name(self, name):\n self._length_tensor_name = name\n\n @property\n def text_id_tensor_name(self):\n \"\"\"The name of text index tensor.\n \"\"\"\n return self._text_id_tensor_name\n\n @text_id_tensor_name.setter\n def text_id_tensor_name(self, name):\n self._text_id_tensor_name = name\n\n @property\n def added_length(self):\n \"\"\"The added text length due to appended bos and eos tokens.\n \"\"\"\n return self._added_length\n\nclass VarUttTextDataDecoder(data_decoder.DataDecoder):\n \"\"\"A text data decoder that decodes raw text data. Each data is considered\n to be multiple sentences concatenated by a delimiter.\n\n Operations include splitting on word or character level, truncation,\n inserting special tokens, mapping text units to indexes, etc.\n\n Args:\n split_level (str): The name of split level on which text sequence is\n split. Either \"word\" or \"char\".\n delimiter (str): The delimiter character used when splitting on word\n level.\n bos_token (str, optional): Special token added to the beginning of\n sequences. If it is `None` (default) or an empty string, no\n BOS token is added.\n eos_token (str, optional): Special tokan added to the end of\n sequences. If it is `None` (default) or an empty string, no EOS\n token is added.\n max_seq_length (int): Maximum length of each sequence.\n Tokens exceed the maximum length will be truncated. Additional\n padding will be done to ensure output sequence all reach this\n number. The length does not include any added bos_token and eos_\n token.\n max_utterance_cnt (int): Maximum number of sequences.\n Additional empty sentences will be added to\n ensure the respective dimension of the output tensor has size\n :attr:`max_utterance_cnt`. The output item named by\n :meth:`utterance_cnt_tensor_name` contains the actual number of\n utterance in the data.\n token_to_id_map (optional): A\n :class:`~tensorflow.contrib.lookup.HashTable` instance that maps\n token strings to integer indexes. If not given, the decoder will\n not decode text into indexes. :attr:`bos_token` and\n :attr:`eos_token` (if given) should have entries in the\n :attr:`token_to_id_map` (if given).\n text_tensor_name (str): Name of the text tensor results. Used as a\n key to retrieve the text tensor.\n length_tensor_name (str): Name of the text length tensor results.\n text_id_tensor_name (str): Name of the text index tensor results.\n \"\"\"\n\n def __init__(self,\n split_level=\"word\",\n delimiter=\" \",\n sentence_delimiter=\"|||\",\n bos_token=None,\n eos_token=None,\n max_seq_length=None,\n max_utterance_cnt=None,\n token_to_id_map=None,\n text_tensor_name=\"text\",\n length_tensor_name=\"length\",\n text_id_tensor_name=\"text_ids\",\n utterance_cnt_tensor_name=\"utterance_cnt\"):\n self._split_level = split_level\n self._delimiter = delimiter\n self._bos_token = bos_token\n self._eos_token = eos_token\n self._max_seq_length = max_seq_length\n self._token_to_id_map = token_to_id_map\n self._text_tensor_name = text_tensor_name\n self._text_id_tensor_name = text_id_tensor_name\n self._length_tensor_name = length_tensor_name\n self._utterance_cnt_tensor_name = utterance_cnt_tensor_name\n self._sentence_delimiter = sentence_delimiter\n self._max_utterance_cnt = max_utterance_cnt\n self._added_length = 0\n\n def __call__(self, data):\n outputs = self.decode(data, self.list_items())\n return dict(zip(self.list_items(), outputs))\n\n def decode(self, data, items): # pylint: disable=too-many-locals\n \"\"\"Decodes the data to return the tensors specified by the list of\n items.\n\n Args:\n data: The text data to decode.\n items: A list of strings, each of which is the name of the resulting\n tensors to retrieve.\n\n Returns:\n A list of tensors, each of which corresponds to each item. If\n `token_to_id_map` is not given when constructing the decoder,\n returns `None` for the token index item.\n \"\"\"\n\n sentences = tf.string_split([data],\n delimiter=self._sentence_delimiter).values\n\n # Truncate utterances\n if self._max_utterance_cnt:\n sentences = sentences[:self._max_utterance_cnt]\n utterance_cnt = tf.shape(sentences)[0]\n\n # Get (max) sentence length\n def _get_sent_length(s):\n raw_length = tf.size(\n tf.string_split([s], delimiter=self._delimiter).values)\n if self._max_seq_length:\n return tf.minimum(raw_length, self._max_seq_length)\n else:\n return raw_length\n\n raw_sent_length = tf.map_fn(\n _get_sent_length, sentences, dtype=tf.int32)\n sent_length = self._max_seq_length\n if not sent_length:\n sent_length = tf.reduce_max(raw_sent_length)\n if _append_token(self._eos_token):\n raw_sent_length += 1\n sent_length += 1\n self._added_length += 1\n if _append_token(self._bos_token):\n raw_sent_length += 1\n sent_length += 1\n self._added_length += 1\n\n def _trunc_and_pad(s, pad_token, max_length):\n if self._max_seq_length:\n s = s[:self._max_seq_length]\n if _append_token(self._bos_token):\n s = np.append([self._bos_token], s)\n if _append_token(self._eos_token):\n s = np.append(s, [self._eos_token])\n s = np.append(s, [pad_token]*(max_length-s.size))\n return s\n\n # Split each sentence to tokens, and pad them to a same length.\n # This is necessary to treat all sentences as a single tensor.\n split_sentences = tf.map_fn(\n lambda s: tf.py_func(\n _trunc_and_pad,\n [\n tf.string_split([s], delimiter=self._delimiter).values,\n SpecialTokens.PAD,\n sent_length\n ],\n tf.string),\n sentences, dtype=tf.string\n )\n\n split_sentences = tf.reshape(split_sentences,\n [utterance_cnt, sent_length])\n\n # Map to index\n token_ids = None\n if self._token_to_id_map is not None:\n token_ids = self._token_to_id_map.lookup(split_sentences)\n\n outputs = {\n self._text_tensor_name: split_sentences,\n self._length_tensor_name: raw_sent_length,\n self._utterance_cnt_tensor_name: tf.shape(sentences)[0],\n self._text_id_tensor_name: token_ids\n }\n return [outputs[item] for item in items]\n\n def list_items(self):\n \"\"\"Returns the list of item names that the decoder can produce.\n\n Returns:\n A list of strings can be passed to :meth:`decode()`.\n \"\"\"\n return [\n self._text_tensor_name,\n self._length_tensor_name,\n self._text_id_tensor_name,\n self._utterance_cnt_tensor_name\n ]\n\n @property\n def text_tensor_name(self):\n \"\"\"The name of text tensor.\n \"\"\"\n return self._text_tensor_name\n\n @text_tensor_name.setter\n def text_tensor_name(self, name):\n self._text_tensor_name = name\n\n @property\n def utterance_cnt_tensor_name(self):\n \"\"\"The name of the utterance count tensor.\n \"\"\"\n return self._utterance_cnt_tensor_name\n\n @property\n def length_tensor_name(self):\n \"\"\"The name of length tensor.\n \"\"\"\n return self._length_tensor_name\n\n @length_tensor_name.setter\n def length_tensor_name(self, name):\n self._length_tensor_name = name\n\n @property\n def text_id_tensor_name(self):\n \"\"\"The name of text index tensor.\n \"\"\"\n return self._text_id_tensor_name\n\n @text_id_tensor_name.setter\n def text_id_tensor_name(self, name):\n self._text_id_tensor_name = name\n\n @property\n def added_length(self):\n \"\"\"The added text length due to appended bos and eos tokens.\n \"\"\"\n return self._added_length\n", "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=invalid-name, no-member, too-many-locals\n\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # ERROR\nimport sys\nimport codecs\nimport numpy as np\nimport tensorflow as tf\nimport texar as tx\nfrom matplotlib import pyplot as plt\n\nplt.switch_backend('agg')\n\nimport self_attn_hyperparams\nimport bleu_tool\n\n\ndef _main(_):\n hparams = self_attn_hyperparams.load_hyperparams()\n train_dataset_hparams, valid_dataset_hparams, test_dataset_hparams, \\\n decoder_hparams, opt_hparams, opt_vars, loss_hparams, args = \\\n hparams['train_dataset_hparams'], hparams['eval_dataset_hparams'], \\\n hparams['test_dataset_hparams'], hparams['decoder_hparams'], \\\n hparams['opt_hparams'], hparams['opt_vars'], \\\n hparams['loss_hparams'], hparams['args']\n\n # Data\n train_data = tx.data.MonoTextData(train_dataset_hparams)\n valid_data = tx.data.MonoTextData(valid_dataset_hparams)\n test_data = tx.data.MonoTextData(test_dataset_hparams)\n iterator = tx.data.TrainTestDataIterator(train=train_data,\n val=valid_data,\n test=test_data)\n data_batch = iterator.get_next()\n mask_id = train_data.vocab.token_to_id_map_py['<m>']\n boa_id = train_data.vocab.token_to_id_map_py['<BOA>']\n eoa_id = train_data.vocab.token_to_id_map_py['<EOA>']\n eos_id = train_data.vocab.token_to_id_map_py['<EOS>']\n pad_id = train_data.vocab.token_to_id_map_py['<PAD>']\n template_pack, answer_packs = \\\n tx.utils.prepare_template(data_batch, args, mask_id, boa_id, eoa_id, pad_id)\n\n # Model architecture\n embedder = tx.modules.WordEmbedder(vocab_size=train_data.vocab.size,\n hparams=args.word_embedding_hparams)\n decoder = \\\n tx.modules.TemplateTransformerDecoder(embedding=embedder._embedding,\n hparams=decoder_hparams)\n\n cetp_loss = None\n cur_template_pack = template_pack\n for hole in answer_packs:\n logits, preds = decoder(decoder_input_pack=hole,\n template_input_pack=cur_template_pack,\n encoder_decoder_attention_bias=None,\n args=args)\n cur_loss = tx.utils.smoothing_cross_entropy(\n logits,\n hole['text_ids'][:, 1:],\n train_data.vocab.size,\n loss_hparams['label_confidence'])\n cetp_loss = cur_loss if cetp_loss is None \\\n else tf.concat([cetp_loss, cur_loss], -1)\n cur_template_pack = tx.utils.update_template_pack(cur_template_pack,\n hole['text_ids'][:, 1:],\n mask_id, eoa_id, pad_id)\n cetp_loss = tf.reduce_mean(cetp_loss)\n\n global_step = tf.Variable(0, trainable=False)\n if args.learning_rate_strategy == 'static':\n learning_rate = tf.placeholder(dtype=tf.float32, shape=(), name='learning_rate')\n elif args.learning_rate_strategy == 'dynamic':\n fstep = tf.to_float(global_step)\n learning_rate = opt_hparams['lr_constant'] \\\n * args.hidden_dim ** -0.5 \\\n * tf.minimum(fstep ** -0.5, fstep * opt_hparams['warmup_steps'] ** -1.5)\n else:\n raise ValueError('Unknown learning_rate_strategy: %s, expecting one of '\n '[\\'static\\', \\'dynamic\\']' % args.learning_rate_strategy)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,\n beta1=opt_hparams['Adam_beta1'],\n beta2=opt_hparams['Adam_beta2'],\n epsilon=opt_hparams['Adam_epsilon'])\n train_op = optimizer.minimize(cetp_loss, global_step)\n\n offsets = tx.utils.generate_prediction_offsets(data_batch['text_ids'],\n args.max_decode_len + 1)\n predictions = []\n cur_test_pack = template_pack\n for idx, hole in enumerate(answer_packs):\n segment_ids = \\\n tx.utils.generate_prediction_segment_ids(data_batch['text_ids'],\n 1, # segment_id will always be 1\n args.max_decode_len + 1)\n preds = decoder.dynamic_decode(\n template_input_pack=cur_test_pack,\n encoder_decoder_attention_bias=None,\n segment_ids=segment_ids,\n offsets=offsets,\n bos_id=boa_id,\n eos_id=eoa_id)\n predictions.append(preds['sampled_ids'][:, 0])\n cur_test_pack = tx.utils.update_template_pack(cur_test_pack,\n preds['sampled_ids'][:, 0],\n mask_id, eoa_id, pad_id)\n\n def _train_epochs(session, cur_epoch, mode='train'):\n iterator.switch_to_train_data(session)\n loss_lists, ppl_lists = [], []\n cnt = 0\n while True:\n try:\n fetches = {\n 'template': template_pack,\n 'holes': answer_packs,\n 'step': global_step,\n 'lr': learning_rate,\n 'loss': cetp_loss\n }\n if mode == 'train':\n fetches['train_op'] = train_op\n feed = {\n tx.context.global_mode(): tf.estimator.ModeKeys.TRAIN if mode == 'train'\n else tf.estimator.ModeKeys.EVAL\n }\n if args.learning_rate_strategy == 'static':\n feed[learning_rate] = opt_vars['learning_rate']\n rtns = session.run(fetches, feed_dict=feed)\n step, template_, holes_, loss = rtns['step'], \\\n rtns['template'], rtns['holes'], rtns['loss']\n ppl = np.exp(loss)\n if step % 200 == 1 and mode == 'train':\n rst = 'step:%s source:%s loss:%f ppl:%f lr:%f' % \\\n (step, template_['text_ids'].shape, loss, ppl, rtns['lr'])\n print(rst)\n loss_lists.append(loss)\n ppl_lists.append(ppl)\n cnt += 1\n if mode is not 'train' and cnt >= 50:\n break\n except tf.errors.OutOfRangeError:\n if args.learning_rate_strategy == 'static':\n avg_loss = np.average(loss_list)\n if avg_loss < opt_vars['best_train_loss']:\n opt_vars['best_train_loss'] = avg_loss\n opt_vars['epochs_not_improved'] = 0\n else:\n opt_vars['epochs_not_improved'] += 1\n if opt_vars['epochs_not_improved'] >= 8 and opt_vars['decay_time'] <= 3:\n opt_vars['learning_rate'] *= opt_vars['lr_decay_rate']\n print(\"[LR DECAY]: lr decay to %f at epoch %d\" %\n (opt_vars['learning_rate'], cur_epoch))\n opt_vars['decay_time'] += 1\n break\n return loss_lists, ppl_lists\n\n def _test_epoch(cur_sess, cur_epoch, mode='test'):\n def _id2word_map(id_arrays):\n return [' '.join([train_data.vocab._id_to_token_map_py[i]\n for i in sent]) for sent in id_arrays]\n\n if mode == 'test':\n iterator.switch_to_test_data(cur_sess)\n elif mode == 'train':\n iterator.switch_to_train_data(cur_sess)\n else:\n iterator.switch_to_val_data(cur_sess)\n templates_list, targets_list, hypothesis_list = [], [], []\n cnt = 0\n loss_lists, ppl_lists = [], []\n while True:\n try:\n fetches = {\n 'data_batch': data_batch,\n 'predictions': predictions,\n 'template': template_pack,\n 'step': global_step,\n 'loss': cetp_loss\n }\n feed = {tx.context.global_mode(): tf.estimator.ModeKeys.EVAL}\n rtns = cur_sess.run(fetches, feed_dict=feed)\n real_templates_, templates_, targets_, predictions_ = \\\n rtns['template']['templates'], rtns['template']['text_ids'], \\\n rtns['data_batch']['text_ids'], rtns['predictions']\n loss = rtns['loss']\n ppl = np.exp(loss)\n loss_lists.append(loss)\n ppl_lists.append(ppl)\n\n filled_templates = \\\n tx.utils.fill_template(template_pack=rtns['template'],\n predictions=rtns['predictions'],\n eoa_id=eoa_id, pad_id=pad_id, eos_id=eos_id)\n\n templates, targets, generateds = _id2word_map(real_templates_.tolist()), \\\n _id2word_map(targets_), \\\n _id2word_map(filled_templates)\n\n for template, target, generated in zip(templates, targets, generateds):\n template = template.split('<EOS>')[0].split('<PAD>')[0].strip().split()\n target = target.split('<EOS>')[0].split('<PAD>')[0].strip().split()\n got = generated.split('<EOS>')[0].split('<PAD>')[0].strip().split()\n templates_list.append(template)\n targets_list.append(target)\n hypothesis_list.append(got)\n\n cnt += 1\n if mode is not 'test' and cnt >= 60:\n break\n except tf.errors.OutOfRangeError:\n break\n\n avg_loss, avg_ppl = np.mean(loss_lists), np.mean(ppl_lists)\n outputs_tmp_filename = args.log_dir + 'epoch{}.beam{}.outputs.tmp'. \\\n format(cur_epoch, args.beam_width)\n template_tmp_filename = args.log_dir + 'epoch{}.beam{}.templates.tmp'. \\\n format(cur_epoch, args.beam_width)\n refer_tmp_filename = os.path.join(args.log_dir, 'eval_reference.tmp')\n with codecs.open(outputs_tmp_filename, 'w+', 'utf-8') as tmpfile, \\\n codecs.open(template_tmp_filename, 'w+', 'utf-8') as tmptpltfile, \\\n codecs.open(refer_tmp_filename, 'w+', 'utf-8') as tmpreffile:\n for hyp, tplt, tgt in zip(hypothesis_list, templates_list, targets_list):\n tmpfile.write(' '.join(hyp) + '\\n')\n tmptpltfile.write(' '.join(tplt) + '\\n')\n tmpreffile.write(' '.join(tgt) + '\\n')\n eval_bleu = float(100 * bleu_tool.bleu_wrapper(\n refer_tmp_filename, outputs_tmp_filename, case_sensitive=True))\n template_bleu = float(100 * bleu_tool.bleu_wrapper(\n refer_tmp_filename, template_tmp_filename, case_sensitive=True))\n print('epoch:{} {}_bleu:{} template_bleu:{} {}_loss:{} {}_ppl:{} '.\n format(cur_epoch, mode, eval_bleu, template_bleu, mode, avg_loss, mode, avg_ppl))\n os.remove(outputs_tmp_filename)\n os.remove(template_tmp_filename)\n os.remove(refer_tmp_filename)\n if args.save_eval_output:\n result_filename = \\\n args.log_dir + 'epoch{}.beam{}.{}.results.bleu{:.3f}' \\\n .format(cur_epoch, args.beam_width, mode, eval_bleu)\n with codecs.open(result_filename, 'w+', 'utf-8') as resultfile:\n for tmplt, tgt, hyp in zip(templates_list, targets_list, hypothesis_list):\n resultfile.write(\"- template: \" + ' '.join(tmplt) + '\\n')\n resultfile.write(\"- expected: \" + ' '.join(tgt) + '\\n')\n resultfile.write('- got: ' + ' '.join(hyp) + '\\n\\n')\n return {\n 'eval': eval_bleu,\n 'template': template_bleu\n }, avg_ppl\n\n def _draw_train_loss(epoch, loss_list, mode):\n plt.figure(figsize=(14, 10))\n plt.plot(loss_list, '--', linewidth=1, label='loss trend')\n plt.ylabel('%s till epoch %s' % (mode, epoch))\n plt.xlabel('every 50 steps, present_rate=%f' % args.present_rate)\n plt.savefig(args.log_dir + '/img/%s_curve.png' % mode)\n plt.close('all')\n\n def _draw_bleu(epoch, test_bleu, tplt_bleu, train_bleu, train_tplt_bleu):\n plt.figure(figsize=(14, 10))\n legends = []\n plt.plot(test_bleu, '--', linewidth=1, label='test bleu')\n plt.plot(tplt_bleu, '--', linewidth=1, label='template bleu')\n legends.extend(['test bleu', 'template bleu'])\n plt.ylabel('bleu till epoch {}'.format(epoch))\n plt.xlabel('every epoch')\n plt.legend(legends, loc='upper left')\n plt.savefig(args.log_dir + '/img/bleu.png')\n\n plt.figure(figsize=(14, 10))\n legends = []\n plt.plot(train_bleu, '--', linewidth=1, label='train bleu')\n plt.plot(train_tplt_bleu, '--', linewidth=1, label='train template bleu')\n legends.extend(['train bleu', 'train template bleu'])\n plt.ylabel('bleu till epoch {}'.format(epoch))\n plt.xlabel('every epoch')\n plt.legend(legends, loc='upper left')\n plt.savefig(args.log_dir + '/img/train_bleu.png')\n plt.close('all')\n\n eval_saver = tf.train.Saver(max_to_keep=5)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n\n loss_list, ppl_list, test_ppl_list = [], [], []\n test_bleu, tplt_bleu, train_bleu, train_tplt_bleu = [], [], [], []\n if args.running_mode == 'train_and_evaluate':\n for epoch in range(args.max_train_epoch):\n # bleu on test set and train set\n if epoch % args.bleu_interval == 0 or epoch == args.max_train_epoch - 1:\n bleu_scores, test_ppl = _test_epoch(sess, epoch)\n test_bleu.append(bleu_scores['eval'])\n tplt_bleu.append(bleu_scores['template'])\n test_ppl_list.append(test_ppl)\n _draw_train_loss(epoch, test_ppl_list, mode='test_perplexity')\n\n train_bleu_scores, _ = _test_epoch(sess, epoch, mode='train')\n train_bleu.append(train_bleu_scores['eval'])\n train_tplt_bleu.append(train_bleu_scores['template'])\n _draw_bleu(epoch, test_bleu, tplt_bleu, train_bleu, train_tplt_bleu)\n eval_saver.save(sess, args.log_dir + 'my-model-latest.ckpt')\n\n # train\n losses, ppls = _train_epochs(sess, epoch)\n loss_list.extend(losses)\n ppl_list.extend(ppls)\n _draw_train_loss(epoch, loss_list, mode='train_loss')\n _draw_train_loss(epoch, ppl_list, mode='perplexity')\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n tf.app.run(main=_main)\n", "#\n\"\"\"\nAdversarial losses.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\ndef binary_adversarial_losses(real_data,\n fake_data,\n discriminator_fn,\n mode=\"max_real\"):\n \"\"\"Computes adversarial loss of the real/fake binary classification game.\n\n Args:\n real_data (Tensor or array): Real data of shape\n `[num_real_examples, ...]`.\n fake_data (Tensor or array): Fake data of shape\n `[num_fake_examples, ...]`. `num_real_examples` does not necessarily\n equal `num_fake_examples`.\n discriminator_fn: A callable takes data (e.g., :attr:`real_data` and\n :attr:`fake_data`) and returns the logits of being real. The\n signature of :attr:`discriminator_fn` must be:\n\n `logits, ... = discriminator_fn(data)`\n\n mode (str): Mode of the generator loss. Either `max_real` or `min_fake`.\n\n If `max_real` (default), minimizing the generator loss is to\n maximize the probability of fake data being classified as real.\n\n If `min_fake`, minimizing the generator loss is to minimize the\n probability of fake data being classified as fake.\n\n Returns:\n (scalar Tensor, scalar Tensor): (generator_loss, discriminator_loss).\n \"\"\"\n real_logits = discriminator_fn(real_data)\n if isinstance(real_logits, (list, tuple)):\n real_logits = real_logits[0]\n real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits=real_logits, labels=tf.ones_like(real_logits)))\n\n fake_logits = discriminator_fn(fake_data)\n if isinstance(fake_logits, (list, tuple)):\n fake_logits = fake_logits[0]\n fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits=fake_logits, labels=tf.zeros_like(fake_logits)))\n\n d_loss = real_loss + fake_loss\n\n if mode == \"min_fake\":\n g_loss = - fake_loss\n elif mode == \"max_real\":\n g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits=fake_logits, labels=tf.ones_like(fake_logits)))\n else:\n raise ValueError(\"Unknown mode: %s. Only 'min_fake' and 'max_real' \"\n \"are allowed.\")\n\n return g_loss, d_loss\n" ]
[ [ "tensorflow.random_uniform", "tensorflow.test.main" ], [ "tensorflow.reduce_max", "tensorflow.concat", "tensorflow.shape", "tensorflow.reshape", "tensorflow.cast", "tensorflow.minimum", "numpy.append", "tensorflow.map_fn", "tensorflow.string_split", "tensorflow.string_to_number", "tensorflow.size" ], [ "matplotlib.pyplot.legend", "tensorflow.concat", "tensorflow.minimum", "matplotlib.pyplot.plot", "numpy.mean", "tensorflow.train.AdamOptimizer", "numpy.exp", "tensorflow.Variable", "tensorflow.ConfigProto", "tensorflow.to_float", "matplotlib.pyplot.close", "tensorflow.Session", "tensorflow.train.Saver", "matplotlib.pyplot.figure", "tensorflow.app.run", "matplotlib.pyplot.switch_backend", "tensorflow.placeholder", "matplotlib.pyplot.savefig", "tensorflow.global_variables_initializer", "matplotlib.pyplot.ylabel", "tensorflow.local_variables_initializer", "tensorflow.reduce_mean", "matplotlib.pyplot.xlabel", "numpy.average", "tensorflow.tables_initializer" ], [ "tensorflow.zeros_like", "tensorflow.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
tramtran2/prlab_image_colorization
[ "3ec7f3ad60d6235c5bb232713f1b3ec5f06f4d67" ]
[ "sources/image_colorization/datasets/quantized_colors/utils.py" ]
[ "\"\"\"\nFunctions:\n def read_image(img_path, is_resize = True, width = 224, height = 224, interpolation = cv2.INTER_AREA)\n \n def cielab_color_space()\n def view_db_info(db_root, db_files, db_name)\n \n def compute_prior_prob(image_files, width, height, do_plot, pts_in_hull_path, prior_prob_path)\n def compute_prior_prob_v1(image_files, is_resize, width, height, do_plot, pts_in_hull_path, prior_prob_path, ab_hist_path):\n \n def compute_prior_prob_smoothed(prior_prob_path, prior_prob_smoothed_path, sigma, do_plot = True, verbose = 1)\n def compute_prior_factor(prior_prob_path, prior_prob_smoothed_path, prior_prob_factor_path, gamma = 0.5, alpha = 1, do_plot = True, verbose = 1)\n \nMain:\n def compute_prior_prob_export(db_root, db_file, db_name, column_image = \"image\", column_type = \"type\", process_types = [\"train\"], \n pts_in_hull_path = os.path.join(module_dir, \"data\", \"prior_prob_train_div2k.npy\").replace(\"\\\\\", \"/\"),\n export_prior_prob_path = None, \n export_ab_hist_path = None, \n is_resize = False, width = 112, height = 112, \n do_plot = True, verbose = 1, )\n \n def main()\n def main_index_data(**input_params)\n def main_compute_prior_prob(**input_params)\n def main_compute_prior_prob_smoothed(**input_params)\n def main_cielab_color_space()\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport click, os, pandas as pd, glob, tqdm, cv2, numpy as np, sys\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pylab as plt\nfrom matplotlib.colors import LogNorm\n\nimport time\nfrom skimage import color\nfrom console_progressbar import ProgressBar\n\nimport sklearn.neighbors as nn\n\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import gaussian, convolve\n\ndef read_image(img_path, is_resize = True, width = 224, height = 224, interpolation = cv2.INTER_AREA):\n \"\"\"\n Load img with opencv and reshape\n \"\"\"\n result = {}\n \n org_img_color = cv2.imread(img_path)\n if len(org_img_color.shape)==2: # grayscale\n org_img_color = np.dstack([org_img_color, org_img_color, org_img_color])\n else:\n org_img_color = org_img_color[:, :, ::-1] # RGB convert\n # if\n org_img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n org_img_Lab = cv2.cvtColor(org_img_color, cv2.COLOR_RGB2Lab)\n \n result.update(dict(org_img_color=org_img_color, org_img_gray=org_img_gray, org_img_Lab=org_img_Lab))\n\n if is_resize == True:\n res_img_color = cv2.resize(org_img_color, (width, height), interpolation=interpolation)\n res_img_gray = cv2.resize(org_img_gray, (width, height), interpolation=interpolation)\n res_img_Lab = cv2.cvtColor(res_img_color, cv2.COLOR_RGB2Lab)\n result.update(dict(res_img_color=res_img_color, res_img_gray=res_img_gray, res_img_Lab=res_img_Lab))\n # if\n\n return result\n# read_image\n\ndef compute_prior_prob(image_files, width, height, do_plot, pts_in_hull_path, prior_prob_path):\n \"\"\"\n Compute color prior probabilities for pts in hull\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n df_data = pd.read_hdf(os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"div2k.hdf5\"), \"data\")\n list_types = [\"'train'\"]\n df_select_data = df_data.query(\"type in [\" + \",\".join(list_types) + \"]\")\n image_dir = os.path.join(dataset_dir, \"DIV2K\").replace(\"\\\\\", \"/\")\n\n image_files = image_dir + \"/\" + df_select_data[\"path\"].values\n image_files[0:3], len(image_files)\n\n info = dict(\n image_files = image_files,\n pts_in_hull_path = os.path.join(data_dir, \"colorization_richard_zhang\", \"pts_in_hull.npy\"),\n prior_prob_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_train_div2k.npy\"),\n width = 112,\n height = 112,\n do_plot = True\n )\n locals().update(**info)\n prior_prob = compute_prior_prob(**info)\n \"\"\"\n # Load ab image\n X_ab = []\n for image_path in tqdm.tqdm(image_files):\n result = read_image(image_path, is_resize = True, width = width, height = height)\n X_ab.append(result[\"res_img_Lab\"][:, :, 1:])\n # for\n X_ab = np.array(X_ab)\n X_ab = X_ab - 128.0\n \n # Load the gamut points location\n q_ab = np.load(pts_in_hull_path)\n\n if do_plot:\n plt.figure(figsize=(8, 8))\n plt.title(\"ab quantize\")\n gs = gridspec.GridSpec(1, 1)\n ax = plt.subplot(gs[0])\n for i in range(q_ab.shape[0]):\n ax.scatter(q_ab[:, 0], q_ab[:, 1])\n ax.annotate(str(i), (q_ab[i, 0], q_ab[i, 1]), fontsize=6)\n ax.set_xlim([-110, 110])\n ax.set_ylim([-110, 110])\n # for\n # if\n \n npts, c, h, w = X_ab.shape\n X_a_ravel = np.ravel(X_ab[:, :, :, 0])\n X_b_ravel = np.ravel(X_ab[:, :, :, 1])\n X_ab_ravel = np.vstack((X_a_ravel, X_b_ravel)).T\n \n if do_plot:\n plt.title(\"Prior Distribution in ab space\\n\", fontsize=16)\n plt.hist2d(X_ab_ravel[:, 0], X_ab_ravel[:, 1], bins=100, density=True, norm=LogNorm(), cmap=plt.cm.jet)\n plt.xlim([-120, 120])\n plt.ylim([-120, 120])\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.xlabel(\"b channel\", fontsize = 14)\n plt.ylabel(\"a channel\", fontsize = 14)\n plt.colorbar()\n plt.show()\n plt.clf()\n plt.close()\n # if\n \n # Create nearest neighbord instance with index = q_ab\n NN = 1\n nearest = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(q_ab)\n # Find index of nearest neighbor for X_ab\n dists, ind = nearest.kneighbors(X_ab_ravel)\n\n # We now count the number of occurrences of each color\n ind = np.ravel(ind)\n counts = np.bincount(ind)\n idxs = np.nonzero(counts)[0]\n prior_prob = np.zeros((q_ab.shape[0]))\n \n prior_prob[idxs] = counts[idxs] \n \n # We turn this into a color probability\n prior_prob = prior_prob / (1.0 * np.sum(prior_prob))\n\n # Save\n if prior_prob_path is not None:\n save_dir = os.path.dirname(prior_prob_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n pts_in_hull_name = os.path.basename(pts_in_hull_path)\n safe_copy(pts_in_hull_path, os.path.join(save_dir, pts_in_hull_name))\n np.save(prior_prob_path, prior_prob)\n # if\n\n if do_plot:\n plt.hist(prior_prob, bins=100)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n plt.show()\n # if\n \n return prior_prob\n pass\n# compute_prior_prob\n\ndef compute_prior_prob_v1(image_files, is_resize, width, height, do_plot, pts_in_hull_path, prior_prob_path, ab_hist_path):\n \"\"\"\n Compute color prior probabilities for pts in hull\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n df_data = pd.read_hdf(os.path.join(dataset_dir, \"DIV2K\", \"div2k.hdf5\"), \"data\")\n list_types = [\"'train'\"]\n df_select_data = df_data.query(\"type in [\" + \",\".join(list_types) + \"]\")\n image_dir = os.path.join(dataset_dir, \"DIV2K\").replace(\"\\\\\", \"/\")\n\n image_files = image_dir + \"/\" + df_select_data[\"path\"].values\n image_files[0:3], len(image_files)\n\n info = dict(\n image_files = image_files,\n pts_in_hull_path = os.path.join(module_dir, \"data\", \"pts_in_hull.npy\"),\n prior_prob_path = os.path.join(module_dir, \"data\", \"prior_prob_train_div2k.npy\"),\n ab_hist_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"ab_hist_train_div2k.npy\"),\n \n is_resize = False,\n width = 112,\n height = 112,\n \n do_plot = True\n )\n locals().update(**info)\n prior_prob = compute_prior_prob(**info)\n \"\"\"\n # Load ab image\n ab_hist = np.zeros((256, 256), dtype = np.uint64)\n for image_path in tqdm.tqdm(image_files):\n result = read_image(image_path, is_resize = is_resize, \n width = width, height = height)\n I_ab = result[\"res_img_Lab\"][:, :, 1:] if is_resize==True else result[\"org_img_Lab\"][:, :, 1:] \n I_ab = I_ab.reshape(-1, 2).astype(np.uint)\n\n (ab_vals, ab_cnts) = np.unique(I_ab, return_counts = True, axis=0)\n ab_hist[ab_vals[:, 0], ab_vals[:, 1]] += ab_cnts.astype(np.uint64)\n # for\n \n # Load the gamut points location\n q_ab = np.load(pts_in_hull_path)\n \n if do_plot:\n plt.figure(figsize=(8, 8))\n gs = gridspec.GridSpec(1, 1)\n ax = plt.subplot(gs[0])\n for i in range(q_ab.shape[0]):\n ax.scatter(q_ab[:, 0], q_ab[:, 1])\n ax.annotate(str(i), (q_ab[i, 0], q_ab[i, 1]), fontsize=6)\n ax.set_xlim([-110, 110])\n ax.set_ylim([-110, 110])\n # for\n \n plt.title(\"Prior Distribution in ab space\\n\", fontsize=16)\n plt.imshow(ab_hist.transpose(), norm=LogNorm(), cmap=plt.cm.jet, extent = (-128, 127, -128, 127), origin = \"uper\")\n plt.xlim([-120, 120])\n plt.ylim([-120, 120])\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.xlabel(\"b channel\", fontsize = 14)\n plt.ylabel(\"a channel\", fontsize = 14)\n plt.colorbar()\n plt.show()\n plt.clf()\n plt.close()\n # if\n \n X_ab_ravel_h = np.vstack(np.nonzero(ab_hist)).T\n X_ab_ravel_h = X_ab_ravel_h - 128\n \n # Create nearest neighbord instance with index = q_ab\n NN = 1\n nearest = nn.NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(q_ab)\n # Find index of nearest neighbor for X_ab\n dists, ind = nearest.kneighbors(X_ab_ravel_h)\n\n # We now count the number of occurrences of each color\n ind = np.ravel(ind)\n \n counts = np.zeros(np.max(ind) + 1, np.uint64)\n for idx, (a,b) in enumerate(X_ab_ravel_h):\n counts[ind[idx]] = counts[ind[idx]] + ab_hist[(a + 128,b + 128)]\n pass\n # for\n \n idxs = np.nonzero(counts)[0]\n prior_prob = np.zeros((q_ab.shape[0]))\n prior_prob[idxs] = counts[idxs]\n \n # We turn this into a color probability\n prior_prob = prior_prob / (1.0 * np.sum(prior_prob))\n\n # Save\n if prior_prob_path is not None:\n save_dir = os.path.dirname(prior_prob_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(prior_prob_path, prior_prob)\n # if\n \n # Save\n if ab_hist_path is not None:\n save_dir = os.path.dirname(ab_hist_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(ab_hist_path, ab_hist)\n # if\n\n if do_plot:\n plt.hist(prior_prob, bins=100)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n plt.show()\n # if\n \n return prior_prob, ab_hist\n pass\n# compute_prior_prob_v1\n\ndef compute_prior_prob_smoothed(prior_prob_path, prior_prob_smoothed_path, sigma = 5, do_plot = True, verbose = 1):\n \"\"\"\n Interpolation on prior prob, next using interpolation to smoothness path, and normalize again\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n info = dict(\n prior_prob_path = os.path.join(module_dir, \"data\", \"prior_prob_train_div2k.npy\"),\n prior_prob_smoothed_path = os.path.join(module_dir, \"data\", \"prior_prob_smoothed_train_div2k.npy\"),\n sigma = 5,\n do_plot = True,\n verbose = True,\n )\n locals().update(**info)\n prior_prob_smoothed = compute_prior_prob_smoothed(**info)\n \"\"\"\n # load prior probability\n \n if verbose==1: print(\"\\n=== Compute Prior Probability Smoothed === \")\n prior_prob = np.load(prior_prob_path)\n \n # add an epsilon to prior prob to avoid 0 vakues and possible NaN\n prior_prob += 1E-3 * np.min(prior_prob)\n # renormalize\n prior_prob = prior_prob / (1.0 * np.sum(prior_prob))\n\n # Smooth with gaussian\n f = interp1d(np.arange(prior_prob.shape[0]), prior_prob)\n xx = np.linspace(0, prior_prob.shape[0] - 1, 1000)\n yy = f(xx)\n window = gaussian(2000, sigma) # 2000 pts in the window, sigma=5\n smoothed = convolve(yy, window / window.sum(), mode='same')\n fout = interp1d(xx, smoothed)\n prior_prob_smoothed = np.array([fout(i) for i in range(prior_prob.shape[0])])\n prior_prob_smoothed = prior_prob_smoothed / np.sum(prior_prob_smoothed)\n\n # Save\n if prior_prob_smoothed_path is not None:\n save_dir = os.path.dirname(prior_prob_smoothed_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(prior_prob_smoothed_path, prior_prob_smoothed)\n # if\n\n if do_plot:\n plt.figure(figsize=(20, 10))\n plt.subplot(2, 2, 1)\n plt.plot(prior_prob, label=\"prior_prob\")\n plt.plot(prior_prob_smoothed, \"g--\", label=\"prior_prob_smoothed\")\n plt.yscale(\"log\")\n plt.legend()\n \n plt.subplot(2, 2, 2)\n plt.plot(prior_prob, label=\"prior_prob\")\n plt.plot(xx, smoothed, \"r-\", label=\"smoothed\")\n plt.yscale(\"log\")\n plt.legend()\n \n plt.subplot(2, 2, 3)\n plt.hist(prior_prob, bins=100)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.subplot(2, 2, 4)\n plt.hist(prior_prob_smoothed, bins=100)\n plt.xlabel(\"Prior probability smoothed\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.show()\n # if\n \n return prior_prob_smoothed\n# compute_prior_prob_smoothed\n\ndef compute_prior_factor(prior_prob_path, prior_prob_smoothed_path, prior_prob_factor_path, gamma = 0.5, alpha = 1, do_plot = True, verbose = 1):\n \"\"\"\n Calculate prior probability factorization\n Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py\n Usage:\n info = dict(\n prior_prob_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_train_div2k.npy\"),\n prior_prob_smoothed_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_smoothed_train_div2k.npy\"),\n prior_prob_factor_path = os.path.join(data_dir, \"preprocessing\", \"DIV2K\", \"prior_prob_factor_train_div2k.npy\"),\n gamma = 0.5,\n alpha = 1,\n do_plot = True,\n verbose = 1,\n )\n locals().update(**info)\n prior_factor = compute_prior_factor(**info)\n \"\"\"\n if verbose==1: print(\"\\n=== Compute Prior Factor === \")\n prior_prob = np.load(prior_prob_path)\n prior_prob_smoothed = np.load(prior_prob_smoothed_path)\n\n u = np.ones_like(prior_prob_smoothed)\n u = u / np.sum(1.0 * u)\n\n prior_factor = (1 - gamma) * prior_prob_smoothed + gamma * u\n prior_factor = np.power(prior_factor, -alpha)\n\n # renormalize\n prior_factor = prior_factor / (np.sum(prior_factor * prior_prob_smoothed))\n\n # Save\n if prior_prob_factor_path is not None:\n save_dir = os.path.dirname(prior_prob_factor_path)\n if save_dir != \"\" and os.path.exists(save_dir) == False: os.makedirs(save_dir)\n np.save(prior_prob_factor_path, prior_factor)\n # if\n \n if do_plot:\n plt.figure(figsize=(20, 10))\n plt.subplot(1, 3, 1)\n plt.hist(prior_prob)\n plt.xlabel(\"Prior probability\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.subplot(1, 3, 2)\n plt.hist(prior_prob_smoothed)\n plt.xlabel(\"Prior probability smoothed\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.subplot(1, 3, 3)\n plt.hist(prior_factor)\n plt.xlabel(\"Prior probability smoothed factor\")\n plt.ylabel(\"Frequency\")\n plt.yscale(\"log\")\n \n plt.show()\n # if\n\n return prior_factor\n# def\n\ndef cielab_color_space():\n print('SkImage:')\n start = time.time()\n L = [0] * 256 ** 3\n a = [0] * 256 ** 3\n b = [0] * 256 ** 3\n i = 0\n pb = ProgressBar(total=256, prefix='SkImage converting images', suffix='', decimals=3, length=50, fill='=')\n for r in range(256):\n for g in range(256):\n for bb in range(256):\n im = np.array((bb, g, r), np.uint8).reshape(1, 1, 3)\n color.rgb2lab(im) # transform it to LAB\n L[i] = im[0, 0, 0]\n a[i] = im[0, 0, 1]\n b[i] = im[0, 0, 2]\n i += 1\n # for\n # for\n pb.print_progress_bar(r)\n # for\n \n print(\"\")\n print(min(L), '<=L<=', max(L))\n print(min(a), '<=a<=', max(a))\n print(min(b), '<=b<=', max(b))\n end = time.time()\n elapsed = end - start\n print('elapsed: {} seconds.'.format(elapsed))\n ##############################################\n \n print('OpenCV:')\n start = time.time()\n L = [0] * 256 ** 3\n a = [0] * 256 ** 3\n b = [0] * 256 ** 3\n i = 0\n pb = ProgressBar(total=256, prefix='OpenCV converting images', suffix='', decimals=3, length=50, fill='=')\n for r in range(256):\n for g in range(256):\n for bb in range(256):\n im = np.array((bb, g, r), np.uint8).reshape(1, 1, 3)\n cv2.cvtColor(im, cv2.COLOR_BGR2LAB, im) # transform it to LAB\n L[i] = im[0, 0, 0]\n a[i] = im[0, 0, 1]\n b[i] = im[0, 0, 2]\n i += 1\n # for\n # for\n pb.print_progress_bar(r)\n # for\n\n print(\"\")\n print(min(L), '<=L<=', max(L))\n print(min(a), '<=a<=', max(a))\n print(min(b), '<=b<=', max(b))\n end = time.time()\n elapsed = end - start\n print('elapsed: {} seconds.'.format(elapsed))\n# cielab_color_space\n\ndef view_db_info(db_root, db_files, db_name):\n df_data = pd.read_hdf(db_files, key = db_name)\n\n print(\"Dataset info: \")\n print(\"+ Image Path: \", db_root)\n print(\"+ Index Path: \", db_files)\n print(\"+ Columns: \", df_data.keys())\n print(\"+ Rows: \", len(df_data))\n info_types = df_data.groupby(\"type\").count().reset_index()[[\"type\", \"image\"]].values\n print(\"+ Types: \\n\", info_types)\n print()\n# view_db_info\n\ndef compute_prior_prob_export(db_root, db_file, db_name, column_image = \"image\", column_type = \"type\", process_types = [\"train\"], \n pts_in_hull_path = \"prior_prob_train_div2k.npy\",\n export_prior_prob_path = None, \n export_ab_hist_path = None, \n is_resize = False, width = 112, height = 112, \n do_plot = True, verbose = 1, ): \n print(\"\\n=== Compute Prior Probability === \")\n df_data = pd.read_hdf(db_file, key = db_name)\n select_expr = f'{column_type} in [\"%s\"]'%('\", \"'.join(list(process_types)))\n df_select_data = df_data.query(select_expr)\n image_files = db_root + \"/\" + df_select_data[column_image].values\n \n if verbose==1: \n view_db_info(db_root, db_file, db_name)\n print(f'Select_expr: {select_expr}')\n print(f'Rows after select: {len(df_select_data)}')\n print()\n print(\"Images: \", image_files[0:5], \" ... \")\n print()\n print(\"Caluculate prior probability\")\n # if\n \n prior_prob, ab_hist = compute_prior_prob_v1(image_files = image_files, \n pts_in_hull_path = pts_in_hull_path, \n prior_prob_path = export_prior_prob_path,\n ab_hist_path = export_ab_hist_path,\n is_resize = is_resize, \n width = width, height = height, \n do_plot = do_plot)\n \n if verbose==1:\n print()\n print(f'prior_prob: shape={prior_prob.shape}')\n print(prior_prob)\n print()\n print(f'ab_hist: shape={ab_hist.shape}')\n print(ab_hist)\n # if\n \n return prior_prob, ab_hist\n# compute_prior_prob_export" ]
[ [ "numpy.linspace", "numpy.max", "matplotlib.pylab.legend", "matplotlib.pylab.close", "numpy.ones_like", "matplotlib.pylab.yticks", "matplotlib.pylab.xlim", "numpy.unique", "numpy.arange", "numpy.save", "matplotlib.pylab.subplot", "scipy.interpolate.interp1d", "matplotlib.pylab.figure", "matplotlib.gridspec.GridSpec", "matplotlib.pylab.plot", "numpy.ravel", "numpy.load", "sklearn.neighbors.NearestNeighbors", "numpy.zeros", "pandas.read_hdf", "numpy.nonzero", "numpy.power", "numpy.min", "matplotlib.pylab.hist", "matplotlib.pylab.xticks", "matplotlib.pylab.clf", "matplotlib.pylab.colorbar", "numpy.array", "numpy.sum", "matplotlib.pylab.show", "matplotlib.colors.LogNorm", "scipy.signal.gaussian", "numpy.dstack", "matplotlib.pylab.title", "matplotlib.pylab.yscale", "numpy.bincount", "matplotlib.pylab.ylabel", "matplotlib.pylab.ylim", "matplotlib.pylab.xlabel", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "0.14", "0.15", "0.19", "0.18", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] } ]
HarshCasper/mars
[ "4c12c968414d666c7a10f497bc22de90376b1932", "4c12c968414d666c7a10f497bc22de90376b1932", "4c12c968414d666c7a10f497bc22de90376b1932", "4c12c968414d666c7a10f497bc22de90376b1932" ]
[ "mars/tensor/linalg/cholesky.py", "mars/tensor/tests/test_utils.py", "mars/learn/neighbors/tree.py", "mars/tensor/random/beta.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\n\nfrom ...serialize import KeyField, BoolField\nfrom ... import opcodes as OperandDef\nfrom ...utils import check_chunks_unknown_shape\nfrom ...tiles import TilesError\nfrom ..operands import TensorHasInput, TensorOperand, TensorOperandMixin\nfrom ..datasource import tensor as astensor\nfrom ..core import TensorOrder\nfrom ..array_utils import as_same_device, device\n\n\nclass TensorCholesky(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.CHOLESKY\n\n _input = KeyField('input')\n _lower = BoolField('lower')\n\n def __init__(self, lower=None, dtype=None, **kw):\n super().__init__(_lower=lower, _dtype=dtype, **kw)\n\n @property\n def lower(self):\n return self._lower\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, a):\n return self.new_tensor([a], a.shape, order=TensorOrder.C_ORDER)\n\n @classmethod\n def tile(cls, op):\n from ..datasource.zeros import TensorZeros\n from ..base import TensorTranspose\n from ..utils import reverse_order\n from .dot import TensorDot\n from .solve_triangular import TensorSolveTriangular\n\n tensor = op.outputs[0]\n in_tensor = op.input\n check_chunks_unknown_shape([in_tensor], TilesError)\n if in_tensor.nsplits[0] != in_tensor.nsplits[1]:\n # all chunks on diagonal should be square\n nsplits = in_tensor.nsplits[0]\n in_tensor = in_tensor.rechunk([nsplits, nsplits])._inplace_tile()\n\n lower_chunks, upper_chunks = {}, {}\n for i in range(in_tensor.chunk_shape[0]):\n for j in range(in_tensor.chunk_shape[1]):\n if i < j:\n lower_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[0][i], in_tensor.nsplits[1][j]),\n index=(i, j), order=tensor.order)\n upper_chunk = TensorZeros(dtype=tensor.dtype).new_chunk(\n None, shape=(in_tensor.nsplits[1][j], in_tensor.nsplits[0][i]),\n index=(j, i), order=tensor.order)\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n elif i == j:\n target = in_tensor.cix[i, j]\n if i > 0:\n prev_chunks = []\n for p in range(i):\n a, b = lower_chunks[i, p], upper_chunks[p, j]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n\n cholesky_fuse_op = TensorCholeskyFuse()\n lower_chunk = cholesky_fuse_op.new_chunk([target] + prev_chunks,\n shape=target.shape, index=(i, j),\n order=tensor.order)\n else:\n lower_chunk = TensorCholesky(lower=True, dtype=tensor.dtype).new_chunk(\n [target], shape=target.shape, index=(i, j), order=tensor.order)\n\n upper_chunk = TensorTranspose(dtype=lower_chunk.dtype).new_chunk(\n [lower_chunk], shape=lower_chunk.shape[::-1],\n index=lower_chunk.index[::-1], order=reverse_order(lower_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n else:\n target = in_tensor.cix[j, i]\n if j > 0:\n prev_chunks = []\n for p in range(j):\n a, b = lower_chunks[j, p], upper_chunks[p, i]\n prev_chunk = TensorDot(dtype=tensor.dtype).new_chunk(\n [a, b], shape=(a.shape[0], b.shape[1]), order=tensor.order)\n prev_chunks.append(prev_chunk)\n cholesky_fuse_op = TensorCholeskyFuse(by_solve_triangular=True)\n upper_chunk = cholesky_fuse_op.new_chunk([target] + [lower_chunks[j, j]] + prev_chunks,\n shape=target.shape, index=(j, i),\n order=tensor.order)\n else:\n upper_chunk = TensorSolveTriangular(lower=True, dtype=tensor.dtype).new_chunk(\n [lower_chunks[j, j], target], shape=target.shape,\n index=(j, i), order=tensor.order)\n lower_chunk = TensorTranspose(dtype=upper_chunk.dtype).new_chunk(\n [upper_chunk], shape=upper_chunk.shape[::-1],\n index=upper_chunk.index[::-1], order=reverse_order(upper_chunk.order))\n lower_chunks[lower_chunk.index] = lower_chunk\n upper_chunks[upper_chunk.index] = upper_chunk\n\n new_op = op.copy()\n if op.lower:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(lower_chunks.values()), nsplits=in_tensor.nsplits)\n else:\n return new_op.new_tensors(op.inputs, tensor.shape, order=tensor.order,\n chunks=list(upper_chunks.values()), nsplits=in_tensor.nsplits)\n\n @classmethod\n def execute(cls, ctx, op):\n chunk = op.outputs[0]\n (a,), device_id, xp = as_same_device(\n [ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n if xp is np:\n try:\n import scipy.linalg\n\n ctx[chunk.key] = scipy.linalg.cholesky(a, lower=op.lower)\n return\n except ImportError: # pragma: no cover\n pass\n\n r = xp.linalg.cholesky(a)\n if not chunk.op.lower:\n r = r.T.conj()\n\n ctx[chunk.key] = r\n\n\nclass TensorCholeskyFuse(TensorOperand, TensorOperandMixin):\n _op_type_ = OperandDef.CHOLESKY_FUSE\n\n _by_solve_triangular = BoolField('by_solve_triangular')\n\n def __init__(self, by_solve_triangular=None, **kw):\n super().__init__(_by_solve_triangular=by_solve_triangular, **kw)\n\n @property\n def by_solve_triangular(self):\n return self._by_solve_triangular\n\n @classmethod\n def _execute_by_cholesky(cls, inputs):\n import scipy.linalg\n\n target = inputs[0]\n return scipy.linalg.cholesky((target - sum(inputs[1:])), lower=True)\n\n @classmethod\n def _execute_by_solve_striangular(cls, inputs):\n import scipy.linalg\n\n target = inputs[0]\n lower = inputs[1]\n return scipy.linalg.solve_triangular(lower, (target - sum(inputs[2:])), lower=True)\n\n @classmethod\n def execute(cls, ctx, op):\n inputs = [ctx[c.key] for c in op.inputs]\n if op.by_solve_triangular:\n ret = cls._execute_by_solve_striangular(inputs)\n else:\n ret = cls._execute_by_cholesky(inputs)\n ctx[op.outputs[0].key] = ret\n\n\ndef cholesky(a, lower=False):\n \"\"\"\n Cholesky decomposition.\n\n Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,\n where `L` is lower-triangular and .H is the conjugate transpose operator\n (which is the ordinary transpose if `a` is real-valued). `a` must be\n Hermitian (symmetric if real-valued) and positive-definite. Only `L` is\n actually returned.\n\n Parameters\n ----------\n a : (..., M, M) array_like\n Hermitian (symmetric if all elements are real), positive-definite\n input matrix.\n lower : bool\n Whether to compute the upper or lower triangular Cholesky\n factorization. Default is upper-triangular.\n\n Returns\n -------\n L : (..., M, M) array_like\n Upper or lower-triangular Cholesky factor of `a`.\n\n Raises\n ------\n LinAlgError\n If the decomposition fails, for example, if `a` is not\n positive-definite.\n\n Notes\n -----\n\n Broadcasting rules apply, see the `mt.linalg` documentation for\n details.\n\n The Cholesky decomposition is often used as a fast way of solving\n\n .. math:: A \\\\mathbf{x} = \\\\mathbf{b}\n\n (when `A` is both Hermitian/symmetric and positive-definite).\n\n First, we solve for :math:`\\\\mathbf{y}` in\n\n .. math:: L \\\\mathbf{y} = \\\\mathbf{b},\n\n and then for :math:`\\\\mathbf{x}` in\n\n .. math:: L.H \\\\mathbf{x} = \\\\mathbf{y}.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> A = mt.array([[1,-2j],[2j,5]])\n >>> A.execute()\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> L = mt.linalg.cholesky(A, lower=True)\n >>> L.execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> mt.dot(L, L.T.conj()).execute() # verify that L * L.H = A\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?\n >>> mt.linalg.cholesky(A, lower=True).execute()\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n\n \"\"\"\n a = astensor(a)\n\n if a.ndim != 2:\n raise LinAlgError(f'{a.ndim}-dimensional array given. '\n 'Tensor must be two-dimensional')\n if a.shape[0] != a.shape[1]:\n raise LinAlgError('Input must be square')\n\n cho = np.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))\n\n op = TensorCholesky(lower=lower, dtype=cho.dtype)\n return op(a)\n", "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport unittest\n\nfrom mars.lib.mmh3 import hash_from_buffer as mmh3_hash_from_buffer\nfrom mars.session import new_session\nfrom mars import tensor as mt\nfrom mars.tensor.utils import hash_on_axis, normalize_axis_tuple, fetch_corner_data\n\n\nclass Test(unittest.TestCase):\n def testHashOnAxis(self):\n hash_from_buffer = lambda x: mmh3_hash_from_buffer(memoryview(x))\n\n a = np.random.rand(10)\n\n result = hash_on_axis(a, 0, 3)\n expected = np.array([mmh3_hash_from_buffer(element) % 3 for element in a])\n\n np.testing.assert_array_equal(result, expected)\n\n result = hash_on_axis(a, 0, 1)\n expected = np.array([0 for _ in a])\n\n np.testing.assert_array_equal(result, expected)\n\n a = np.random.rand(10, 5)\n\n result = hash_on_axis(a, 0, 3)\n expected = np.array([hash_from_buffer(a[i, :]) % 3 for i in range(a.shape[0])])\n\n np.testing.assert_array_equal(result, expected)\n\n result = hash_on_axis(a, 1, 3)\n expected = np.array([hash_from_buffer(a[:, i]) % 3 for i in range(a.shape[1])])\n\n np.testing.assert_array_equal(result, expected)\n\n a = np.random.rand(10, 5, 4)\n\n result = hash_on_axis(a, 2, 3)\n expected = np.array([hash_from_buffer(a[:, :, i]) % 3 for i in range(a.shape[2])])\n\n np.testing.assert_array_equal(result, expected)\n\n def testNormalizeAxisTuple(self):\n self.assertEqual(normalize_axis_tuple(-1, 3), (2,))\n self.assertEqual(normalize_axis_tuple([0, -2], 3), (0, 1))\n self.assertEqual(sorted(normalize_axis_tuple({0, -2}, 3)), [0, 1])\n\n with self.assertRaises(ValueError) as cm:\n normalize_axis_tuple((1, -2), 3, argname='axes')\n self.assertIn('axes', str(cm.exception))\n\n with self.assertRaises(ValueError):\n normalize_axis_tuple((1, -2), 3)\n\n def testFetchTensorCornerData(self):\n sess = new_session()\n print_options = np.get_printoptions()\n\n # make sure numpy default option\n self.assertEqual(print_options['edgeitems'], 3)\n self.assertEqual(print_options['threshold'], 1000)\n\n size = 12\n for i in (2, 4, size - 3, size, size + 3):\n arr = np.random.rand(i, i, i)\n t = mt.tensor(arr, chunk_size=size // 2)\n sess.run(t, fetch=False)\n\n corner_data = fetch_corner_data(t, session=sess)\n corner_threshold = 1000 if t.size < 1000 else corner_data.size - 1\n with np.printoptions(threshold=corner_threshold, suppress=True):\n # when we repr corner data, we need to limit threshold that\n # it's exactly less than the size\n repr_corner_data = repr(corner_data)\n with np.printoptions(suppress=True):\n repr_result = repr(arr)\n self.assertEqual(repr_corner_data, repr_result,\n f'failed when size == {i}')\n", "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cloudpickle\nimport numpy as np\n\nfrom ...context import RunningMode\nfrom ...core import Object, OBJECT_TYPE, OBJECT_CHUNK_TYPE\nfrom ...serialize import KeyField, Int32Field, DictField, AnyField, BoolField\nfrom ...tiles import TilesError\nfrom ...tensor.core import TensorOrder\nfrom ...utils import check_chunks_unknown_shape, tokenize\nfrom ..operands import LearnOperand, LearnOperandMixin, OutputType\n\n\nclass TreeObject(Object):\n def fetch(self, session=None, **kw):\n result = self._data.fetch(session=session, **kw)\n return cloudpickle.loads(result) \\\n if isinstance(result, bytes) else result\n\n\nclass TreeBase(LearnOperand, LearnOperandMixin):\n _input = KeyField('input')\n _leaf_size = Int32Field('leaf_size')\n _metric = AnyField('metric')\n\n _metric_params = DictField('metric_params')\n\n def __init__(self, leaf_size=None, metric=None,\n metric_params=None, output_types=None, **kw):\n super().__init__(_leaf_size=leaf_size, _metric=metric,\n _metric_params=metric_params,\n _output_types=output_types, **kw)\n if self.output_types is None:\n self.output_types = [OutputType.object]\n\n @property\n def input(self):\n return self._input\n\n @property\n def leaf_size(self):\n return self._leaf_size\n\n @property\n def metric(self):\n return self._metric\n\n @property\n def metric_params(self):\n return self._metric_params\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def __call__(self, a):\n return self.new_tileable([a])\n\n @classmethod\n def tile(cls, op):\n check_chunks_unknown_shape(op.inputs, TilesError)\n\n # ball tree and kd tree requires the full data,\n # thus rechunk input tensor into 1 chunk\n inp = op.input.rechunk({ax: s for ax, s in enumerate(op.input.shape)})\n inp = inp._inplace_tile()\n out = op.outputs[0]\n\n chunk_op = op.copy().reset_key()\n kw = out.params\n kw['index'] = inp.chunks[0].index\n chunk = chunk_op.new_chunk([inp.chunks[0]], kws=[kw])\n\n new_op = op.copy()\n tileable_kw = out.params\n tileable_kw['nsplits'] = ((1,),)\n tileable_kw['chunks'] = [chunk]\n return new_op.new_tileables(op.inputs, kws=[tileable_kw])\n\n @classmethod\n def execute(cls, ctx, op):\n if op.gpu: # pragma: no cover\n raise NotImplementedError('Does not support tree-based '\n 'nearest neighbors on GPU')\n\n a = ctx[op.input.key]\n tree = cls._tree_type(\n a, op.leaf_size, metric=op.metric,\n **(op.metric_params or dict()))\n if ctx.running_mode in [RunningMode.local_cluster, RunningMode.distributed]:\n # for local cluster and distributed, pickle always\n ctx[op.outputs[0].key] = cloudpickle.dumps(tree)\n else:\n # otherwise, to be clear for local, just put into storage directly\n ctx[op.outputs[0].key] = tree\n\n\ndef _on_serialize_tree(tree):\n return cloudpickle.dumps(tree) if not hasattr(tree, 'key') else tree\n\n\ndef _on_deserialize_tree(ser):\n return cloudpickle.loads(ser) if isinstance(ser, bytes) else ser\n\n\nclass TreeQueryBase(LearnOperand, LearnOperandMixin):\n _input = KeyField('input')\n _tree = AnyField('tree', on_serialize=_on_serialize_tree,\n on_deserialize=_on_deserialize_tree)\n _n_neighbors = Int32Field('n_neighbors')\n _return_distance = BoolField('return_distance')\n\n def __init__(self, tree=None, n_neighbors=None, return_distance=None,\n output_types=None, **kw):\n super().__init__(_tree=tree, _n_neighbors=n_neighbors,\n _return_distance=return_distance,\n _output_types=output_types, **kw)\n if self.output_types is None:\n self.output_types = [OutputType.tensor] * self.output_limit\n\n @property\n def input(self):\n return self._input\n\n @property\n def tree(self):\n return self._tree\n\n @property\n def n_neighbors(self):\n return self._n_neighbors\n\n @property\n def return_distance(self):\n return self._return_distance\n\n @property\n def output_limit(self):\n return 2 if self._return_distance else 1\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._input = self._inputs[0]\n if isinstance(self._tree, (OBJECT_TYPE, OBJECT_CHUNK_TYPE)):\n self._tree = self._inputs[1]\n\n def _update_key(self):\n values = []\n for value in self._values_:\n if isinstance(value, self._tree_type):\n values.append(cloudpickle.dumps(value))\n else:\n values.append(value)\n self._obj_set('_key', tokenize(type(self).__name__, *values))\n return self\n\n def __call__(self, x):\n kws = []\n if self._return_distance:\n kws.append({'shape': (x.shape[0], self._n_neighbors),\n 'dtype': np.dtype(np.float64),\n 'order': x.order,\n 'type': 'distance'})\n kws.append({\n 'shape': (x.shape[0], self._n_neighbors),\n 'dtype': np.dtype(np.int64),\n 'order': TensorOrder.C_ORDER,\n 'type': 'indices'\n })\n inputs = [x]\n if isinstance(self._tree, OBJECT_TYPE):\n inputs.append(self._tree)\n return self.new_tileables(inputs, kws=kws, output_limit=len(kws))\n\n @classmethod\n def tile(cls, op):\n inp = op.input\n\n if inp.chunk_shape[1] != 1:\n check_chunks_unknown_shape([inp], TilesError)\n inp = inp.rechunk({1: inp.shape[1]})._inplace_tile()\n\n tree_chunk = None\n if isinstance(op.tree, OBJECT_TYPE):\n tree_chunk = op.tree.chunks[0]\n out_chunks = [[] for _ in range(len(op.outputs))]\n for chunk in inp.chunks:\n chunk_op = op.copy().reset_key()\n if tree_chunk is not None:\n chunk_op._tree = tree_chunk\n chunk_kws = []\n if op.return_distance:\n chunk_kws.append({\n 'shape': (chunk.shape[0], op.n_neighbors),\n 'dtype': np.dtype(np.float64),\n 'order': chunk.order,\n 'index': chunk.index,\n 'type': 'distance'\n })\n chunk_kws.append({\n 'shape': (chunk.shape[0], op.n_neighbors),\n 'dtype': np.dtype(np.int64),\n 'order': TensorOrder.C_ORDER,\n 'index': chunk.index,\n 'type': 'indices'\n })\n chunk_inputs = [chunk]\n if tree_chunk is not None:\n chunk_inputs.append(tree_chunk)\n chunks = chunk_op.new_chunks(chunk_inputs, kws=chunk_kws,\n output_limit=len(chunk_kws))\n for cs, c in zip(out_chunks, chunks):\n cs.append(c)\n\n kws = [o.params for o in op.outputs]\n nsplits = list(inp.nsplits)\n nsplits[1] = (op.n_neighbors,)\n if op.return_distance:\n kws[0]['chunks'] = out_chunks[0]\n kws[0]['nsplits'] = tuple(nsplits)\n kws[-1]['chunks'] = out_chunks[-1]\n kws[-1]['nsplits'] = tuple(nsplits)\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=kws, output_limit=len(kws))\n\n @classmethod\n def execute(cls, ctx, op):\n if op.gpu: # pragma: no cover\n raise NotImplementedError('Does not support tree-based '\n 'nearest neighbors on GPU')\n\n x = ctx[op.input.key]\n if len(op.inputs) == 2:\n tree = ctx[op.tree.key]\n else:\n tree = op.tree\n tree = cloudpickle.loads(tree) if isinstance(tree, bytes) else tree\n ret = tree.query(x, op.n_neighbors, op.return_distance)\n if op.return_distance:\n ctx[op.outputs[0].key] = ret[0]\n ctx[op.outputs[1].key] = ret[1]\n else:\n ctx[op.outputs[0].key] = ret\n", "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import AnyField\nfrom .core import TensorRandomOperandMixin, handle_array, TensorDistribution\n\n\nclass TensorRandBeta(TensorDistribution, TensorRandomOperandMixin):\n __slots__ = '_a', '_b', '_size'\n _input_fields_ = ['_a', '_b']\n _op_type_ = OperandDef.RAND_BETA\n\n _a = AnyField('a')\n _b = AnyField('b')\n _func_name = 'beta'\n\n def __init__(self, state=None, size=None, dtype=None, gpu=None, **kw):\n dtype = np.dtype(dtype) if dtype is not None else dtype\n super().__init__(_state=state, _size=size, _dtype=dtype, _gpu=gpu, **kw)\n\n @property\n def a(self):\n return self._a\n\n @property\n def b(self):\n return self._b\n\n def __call__(self, a, b, chunk_size=None):\n return self.new_tensor([a, b], None, raw_chunk_size=chunk_size)\n\n\ndef beta(random_state, a, b, size=None, chunk_size=None, gpu=None, dtype=None):\n r\"\"\"\n Draw samples from a Beta distribution.\n\n The Beta distribution is a special case of the Dirichlet distribution,\n and is related to the Gamma distribution. It has the probability\n distribution function\n\n .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}\n (1 - x)^{\\beta - 1},\n\n where the normalisation, B, is the beta function,\n\n .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}\n (1 - t)^{\\beta - 1} dt.\n\n It is often seen in Bayesian inference and order statistics.\n\n Parameters\n ----------\n a : float or array_like of floats\n Alpha, non-negative.\n b : float or array_like of floats\n Beta, non-negative.\n size : int or tuple of ints, optional\n Output shape. If the given shape is, e.g., ``(m, n, k)``, then\n ``m * n * k`` samples are drawn. If size is ``None`` (default),\n a single value is returned if ``a`` and ``b`` are both scalars.\n Otherwise, ``mt.broadcast(a, b).size`` samples are drawn.\n chunk_size : int or tuple of int or tuple of ints, optional\n Desired chunk size on each dimension\n gpu : bool, optional\n Allocate the tensor on GPU if True, False as default\n dtype : data-type, optional\n Data-type of the returned tensor.\n\n Returns\n -------\n out : Tensor or scalar\n Drawn samples from the parameterized beta distribution.\n \"\"\"\n if dtype is None:\n dtype = np.random.RandomState().beta(\n handle_array(a), handle_array(b), size=(0,)).dtype\n size = random_state._handle_size(size)\n op = TensorRandBeta(state=random_state.to_numpy(), size=size, gpu=gpu, dtype=dtype)\n return op(a, b, chunk_size=chunk_size)\n" ]
[ [ "numpy.array", "numpy.linalg.LinAlgError" ], [ "numpy.printoptions", "numpy.get_printoptions", "numpy.testing.assert_array_equal", "numpy.random.rand", "numpy.array" ], [ "numpy.dtype" ], [ "numpy.random.RandomState", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hatrungduc/spark-nlp
[ "b38260543524507e34cbcb7fa2006923091634ad" ]
[ "python/tensorflow/lib/ner/embeddings_resolver.py" ]
[ "import shutil\nimport numpy as np\nimport plyvel\nimport os.path\nimport sys\nsys.path.append('../')\nfrom bert.modeling import *\nfrom bert.tokenization import *\nimport json\nimport os.path\nimport numpy as np\n\n\nclass TokenEmbeddings:\n def __init__(self, piece, is_word_start, vector):\n self.piece = piece\n self.is_word_start = is_word_start\n self.vector = vector\n \n @staticmethod\n def create_sentence(pieces, is_word_starts, embeddings):\n # Array of TokenEmbeddings\n return [TokenEmbeddings(piece, is_start, vector)\n for (piece, is_start, vector) in zip(pieces, is_word_starts, embeddings)]\n \n def __str__(self):\n return 'TokenEmbeddings({}, {}, [{}])'.format(self.piece, self.is_word_start, np.shape(self.vector))\n\n def __repr__(self):\n return self.__str__()\n\n\nclass EmbeddingsDbResolver:\n \n @staticmethod\n def get_index_name(prefix, dim):\n return prefix + '-' + str(dim)\n \n @staticmethod\n def read_from_file(glove_file, dim, index_file = 'embeddings_index', \n lowercase=False, clear_if_exists = False):\n \n full_index_file = EmbeddingsDbResolver.get_index_name(index_file, dim)\n try:\n resolver = None\n\n index_existed = os.path.exists(full_index_file) and not clear_if_exists\n resolver = EmbeddingsDbResolver(dim, index_file, lowercase, clear_if_exists)\n\n if not index_existed:\n resolver.read_glove(glove_file)\n\n return resolver\n except:\n if resolver and resolver.db:\n resolver.close()\n \n raise()\n \n def read_glove(self, glove_file):\n portion = 500000\n print('reading file: ', glove_file)\n wb = None\n with open(glove_file, encoding='utf-8') as f:\n for num, line in enumerate(f):\n items = line.split(' ')\n word = items[0]\n vector = [float(x) for x in items[1:]]\n if num % portion == portion - 1:\n print('read lines: {}'.format(num))\n wb.write()\n wb = None\n \n if not wb:\n wb = self.db.write_batch()\n\n self.add_vector(word, vector, wb)\n if wb:\n wb.write()\n \n def __init__(self, dim, index_file = 'embeddings_index', lowercase = False, clear_if_exists=False): \n full_index_file = EmbeddingsDbResolver.get_index_name(index_file, dim)\n \n if clear_if_exists and os.path.exists(full_index_file):\n shutil.rmtree(db_index)\n \n dummy_added = False\n self.db = plyvel.DB(full_index_file, create_if_missing=True)\n self.add_vector(\"__oov__\", [0.] * dim)\n self.lowercase = lowercase\n \n def get_embeddings(self, word):\n word = word.strip()\n if self.lowercase:\n word = word.lower()\n \n result = self.db.get(word.encode()) or self.db.get('__oov__'.encode())\n return np.frombuffer(result)\n \n def resolve_sentence(self, sentence):\n \"\"\"\n sentence - array of words\n \"\"\"\n embeddings = list([self.get_embeddings(word) for word in sentence])\n is_word_start = [True] * len(sentence)\n \n return TokenEmbeddings.create_sentence(sentence, is_word_start, embeddings)\n\n \n def add_vector(self, word, vector, wb = None):\n array = np.array(vector)\n if wb:\n wb.put(word.encode(), array.tobytes())\n else:\n self.db.put(word.encode(), array.tobytes())\n \n def close(self):\n self.db.close()\n \n\nclass BertEmbeddingsResolver:\n \n def __init__(self, model_folder, max_length = 256, lowercase = True):\n \n # 1. Create tokenizer\n self.max_length = max_length\n vocab_file = os.path.join(model_folder, 'vocab.txt')\n self.tokenizer = FullTokenizer(vocab_file, do_lower_case = lowercase)\n \n # 2. Read Config\n config_file = os.path.join(model_folder, 'bert_config.json') \n self.config = BertConfig.from_json_file(config_file)\n \n # 3. Create Model\n self.session = tf.Session()\n self.token_ids_op = tf.placeholder(tf.int32, shape=(None, max_length), name='token_ids')\n self.model = BertModel(config = self.config, \n is_training = False, \n input_ids = self.token_ids_op, \n use_one_hot_embeddings = False)\n \n # 4. Restore Trained Model\n self.saver = tf.train.Saver()\n ckpt_file = os.path.join(model_folder, 'bert_model.ckpt')\n self.saver.restore(self.session, ckpt_file)\n \n hidden_layers = self.config.num_hidden_layers\n self.embeddings_op = tf.get_default_graph().get_tensor_by_name(\n \"bert/encoder/Reshape_{}:0\".format(hidden_layers + 1))\n \n def tokenize_sentence(self, tokens, add_service_tokens = True):\n result = []\n is_word_start = []\n for token in tokens:\n pieces = self.tokenizer.tokenize(token)\n result.extend(pieces)\n starts = [False] * len(pieces)\n starts[0] = True\n is_word_start.extend(starts)\n\n if add_service_tokens:\n if len(result) > self.max_length - 2:\n result = result[:self.max_length -2]\n is_word_start = is_word_start[:self.max_length -2]\n \n result = ['[CLS]'] + result + ['[SEP]']\n is_word_start = [False] + is_word_start + [False]\n else:\n if len(result) > self.max_length:\n result = result[:self.max_length]\n is_word_start = is_word_start[:self.max_length]\n \n return (result, is_word_start)\n\n def resolve_sentences(self, sentences):\n batch_is_word_start = []\n batch_token_ids = []\n batch_tokens = []\n \n for sentence in sentences:\n tokens, is_word_start = self.tokenize_sentence(sentence)\n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n to_input = np.pad(token_ids, [(0, self.max_length - len(token_ids))], mode='constant')\n batch_token_ids.append(to_input)\n batch_tokens.append(tokens)\n batch_is_word_start.append(is_word_start)\n\n embeddings = self.session.run(self.embeddings_op, feed_dict = {self.token_ids_op: batch_token_ids})\n \n result = []\n for i in range(len(sentences)):\n tokens = batch_tokens[i]\n is_word_start = batch_is_word_start[i]\n item_embeddings = embeddings[i, :len(tokens), :]\n\n resolved = TokenEmbeddings.create_sentence(tokens, is_word_start, item_embeddings)\n result.append(resolved)\n \n return result\n\n \n def resolve_sentence(self, sentence):\n tokens, is_word_start = self.tokenize_sentence(sentence)\n \n token_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n to_input = np.pad(token_ids, [(0, self.max_length - len(token_ids))], mode='constant')\n to_input = to_input.reshape((1, self.max_length))\n\n embeddings = self.session.run(self.embeddings_op, feed_dict = {self.token_ids_op: to_input})\n embeddings = np.squeeze(embeddings)\n embeddings = embeddings[:len(token_ids), :]\n\n return TokenEmbeddings.create_sentence(tokens, is_word_start, embeddings)\n " ]
[ [ "numpy.frombuffer", "numpy.squeeze", "numpy.array", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
colibri-coruscans/pyGSTi
[ "da54f4abf668a28476030528f81afa46a1fbba33", "da54f4abf668a28476030528f81afa46a1fbba33", "da54f4abf668a28476030528f81afa46a1fbba33", "da54f4abf668a28476030528f81afa46a1fbba33", "da54f4abf668a28476030528f81afa46a1fbba33", "da54f4abf668a28476030528f81afa46a1fbba33" ]
[ "pygsti/algorithms/germselection.py", "pygsti/report/vbplot.py", "test/test_packages/drivers/testTimeDep.py", "pygsti/layouts/matrixlayout.py", "pygsti/report/colormaps.py", "pygsti/report/python.py" ]
[ "\"\"\"\nFunctions for selecting a complete set of germs for a GST analysis.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport warnings as _warnings\n\nimport numpy as _np\nimport numpy.linalg as _nla\n\nfrom pygsti.algorithms import grasp as _grasp\nfrom pygsti.algorithms import scoring as _scoring\nfrom pygsti import circuits as _circuits\nfrom pygsti import baseobjs as _baseobjs\nfrom pygsti.tools import mpitools as _mpit\n\nFLOATSIZE = 8 # in bytes: TODO: a better way\n\n\ndef find_germs(target_model, randomize=True, randomization_strength=1e-2,\n num_gs_copies=5, seed=None, candidate_germ_counts=None,\n candidate_seed=None, force=\"singletons\", algorithm='greedy',\n algorithm_kwargs=None, mem_limit=None, comm=None,\n profiler=None, verbosity=1):\n \"\"\"\n Generate a germ set for doing GST with a given target model.\n\n This function provides a streamlined interface to a variety of germ\n selection algorithms. It's goal is to provide a method that typical users\n can run by simply providing a target model and leaving all other settings\n at their default values, while providing flexibility for users desiring\n more control to fine tune some of the general and algorithm-specific\n details.\n\n Currently, to break troublesome degeneracies and provide some confidence\n that the chosen germ set is amplificationally complete (AC) for all\n models in a neighborhood of the target model (rather than only the\n target model), an ensemble of models with random unitary perturbations\n to their gates must be provided or generated.\n\n Parameters\n ----------\n target_model : Model or list of Model\n The model you are aiming to implement, or a list of models that are\n copies of the model you are trying to implement (either with or\n without random unitary perturbations applied to the models).\n\n randomize : bool, optional\n Whether or not to add random unitary perturbations to the model(s)\n provided.\n\n randomization_strength : float, optional\n The size of the random unitary perturbations applied to gates in the\n model. See :meth:`~pygsti.objects.Model.randomize_with_unitary`\n for more details.\n\n num_gs_copies : int, optional\n The number of copies of the original model that should be used.\n\n seed : int, optional\n Seed for generating random unitary perturbations to models. Also\n passed along to stochastic germ-selection algorithms.\n\n candidate_germ_counts : dict, optional\n A dictionary of *germ_length* : *count* key-value pairs, specifying\n the germ \"candidate list\" - a list of potential germs to draw from.\n *count* is either an integer specifying the number of random germs\n considered at the given *germ_length* or the special values `\"all upto\"`\n that considers all of the of all non-equivalent germs of length up to\n the corresponding *germ_length*. If None, all germs of up to length\n 6 are used, the equivalent of `{6: 'all upto'}`.\n\n candidate_seed : int, optional\n A seed value used when randomly selecting candidate germs. For each\n germ length being randomly selected, the germ length is added to\n the value of `candidate_seed` to get the actual seed used.\n\n force : str or list, optional\n A list of Circuits which *must* be included in the final germ set.\n If set to the special string \"singletons\" then all length-1 strings will\n be included. Seting to None is the same as an empty list.\n\n algorithm : {'greedy', 'grasp', 'slack'}, optional\n Specifies the algorithm to use to generate the germ set. Current\n options are:\n 'greedy'\n Add germs one-at-a-time until the set is AC, picking the germ that\n improves the germ-set score by the largest amount at each step. See\n :func:`find_germs_breadthfirst` for more details.\n 'grasp'\n Use GRASP to generate random greedy germ sets and then locally\n optimize them. See :func:`find_germs_grasp` for more\n details.\n 'slack'\n From a initial set of germs, add or remove a germ at each step in\n an attempt to improve the germ-set score. Will allow moves that\n degrade the score in an attempt to escape local optima as long as\n the degredation is within some specified amount of \"slack\". See\n :func:`find_germs_integer_slack` for more details.\n\n algorithm_kwargs : dict\n Dictionary of ``{'keyword': keyword_arg}`` pairs providing keyword\n arguments for the specified `algorithm` function. See the documentation\n for functions referred to in the `algorithm` keyword documentation for\n what options are available for each algorithm.\n\n mem_limit : int, optional\n A rough memory limit in bytes which restricts the amount of intermediate\n values that are computed and stored.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n profiler : Profiler, optional\n A profiler object used for to track timing and memory usage.\n\n verbosity : int, optional\n The verbosity level of the :class:`~pygsti.objects.VerbosityPrinter`\n used to print log messages.\n\n Returns\n -------\n list of Circuit\n A list containing the germs making up the germ set.\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm)\n modelList = _setup_model_list(target_model, randomize,\n randomization_strength, num_gs_copies, seed)\n gates = list(target_model.operations.keys())\n availableGermsList = []\n if candidate_germ_counts is None: candidate_germ_counts = {6: 'all upto'}\n for germLength, count in candidate_germ_counts.items():\n if count == \"all upto\":\n availableGermsList.extend(_circuits.list_all_circuits_without_powers_and_cycles(\n gates, max_length=germLength))\n else:\n seed = None if candidate_seed is None else candidate_seed + germLength\n availableGermsList.extend(_circuits.list_random_circuits_onelen(\n gates, germLength, count, seed=seed))\n\n if algorithm_kwargs is None:\n # Avoid danger of using empty dict for default value.\n algorithm_kwargs = {}\n\n if algorithm == 'greedy':\n printer.log('Using greedy algorithm.', 1)\n # Define defaults for parameters that currently have no default or\n # whose default we want to change.\n default_kwargs = {\n 'germs_list': availableGermsList,\n 'randomize': False,\n 'seed': seed,\n 'verbosity': max(0, verbosity - 1),\n 'force': force,\n 'score_func': 'all',\n 'comm': comm,\n 'mem_limit': mem_limit,\n 'profiler': profiler\n }\n for key in default_kwargs:\n if key not in algorithm_kwargs:\n algorithm_kwargs[key] = default_kwargs[key]\n germList = find_germs_breadthfirst(model_list=modelList,\n **algorithm_kwargs)\n if germList is not None:\n germsetScore = compute_germ_set_score(\n germList, neighborhood=modelList,\n score_func=algorithm_kwargs['score_func'])\n printer.log('Constructed germ set:', 1)\n printer.log(str([germ.str for germ in germList]), 1)\n printer.log('Score: {}'.format(germsetScore), 1)\n elif algorithm == 'grasp':\n printer.log('Using GRASP algorithm.', 1)\n # Define defaults for parameters that currently have no default or\n # whose default we want to change.\n default_kwargs = {\n 'alpha': 0.1, # No real reason for setting this value of alpha.\n 'germs_list': availableGermsList,\n 'randomize': False,\n 'seed': seed,\n 'verbosity': max(0, verbosity - 1),\n 'force': force,\n 'return_all': False,\n 'score_func': 'all',\n }\n for key in default_kwargs:\n if key not in algorithm_kwargs:\n algorithm_kwargs[key] = default_kwargs[key]\n germList = find_germs_grasp(model_list=modelList,\n **algorithm_kwargs)\n printer.log('Constructed germ set:', 1)\n\n if algorithm_kwargs['return_all'] and germList[0] is not None:\n germsetScore = compute_germ_set_score(\n germList[0], neighborhood=modelList,\n score_func=algorithm_kwargs['score_func'])\n printer.log(str([germ.str for germ in germList[0]]), 1)\n printer.log('Score: {}'.format(germsetScore))\n elif not algorithm_kwargs['return_all'] and germList is not None:\n germsetScore = compute_germ_set_score(germList,\n neighborhood=modelList)\n printer.log(str([germ.str for germ in germList]), 1)\n printer.log('Score: {}'.format(germsetScore), 1)\n elif algorithm == 'slack':\n printer.log('Using slack algorithm.', 1)\n # Define defaults for parameters that currently have no default or\n # whose default we want to change.\n default_kwargs = {\n 'germs_list': availableGermsList,\n 'randomize': False,\n 'seed': seed,\n 'verbosity': max(0, verbosity - 1),\n 'force': force,\n 'score_func': 'all',\n }\n if ('slack_frac' not in algorithm_kwargs\n and 'fixed_slack' not in algorithm_kwargs):\n algorithm_kwargs['slack_frac'] = 0.1\n for key in default_kwargs:\n if key not in algorithm_kwargs:\n algorithm_kwargs[key] = default_kwargs[key]\n germList = find_germs_integer_slack(modelList,\n **algorithm_kwargs)\n if germList is not None:\n germsetScore = compute_germ_set_score(\n germList, neighborhood=modelList,\n score_func=algorithm_kwargs['score_func'])\n printer.log('Constructed germ set:', 1)\n printer.log(str([germ.str for germ in germList]), 1)\n printer.log('Score: {}'.format(germsetScore), 1)\n else:\n raise ValueError(\"'{}' is not a valid algorithm \"\n \"identifier.\".format(algorithm))\n\n return germList\n\n\ndef compute_germ_set_score(germs, target_model=None, neighborhood=None,\n neighborhood_size=5,\n randomization_strength=1e-2, score_func='all',\n op_penalty=0.0, l1_penalty=0.0):\n \"\"\"\n Calculate the score of a germ set with respect to a model.\n\n More precisely, this function computes the maximum score (roughly equal\n to the number of amplified parameters) for a cloud of models.\n If `target_model` is given, it serves as the center of the cloud,\n otherwise the cloud must be supplied directly via `neighborhood`.\n\n\n Parameters\n ----------\n germs : list\n The germ set\n\n target_model : Model, optional\n The target model, used to generate a neighborhood of randomized models.\n\n neighborhood : list of Models, optional\n The \"cloud\" of models for which scores are computed. If not None, this\n overrides `target_model`, `neighborhood_size`, and `randomization_strength`.\n\n neighborhood_size : int, optional\n Number of randomized models to construct around `target_model`.\n\n randomization_strength : float, optional\n Strength of unitary randomizations, as passed to :method:`target_model.randomize_with_unitary`.\n\n score_func : {'all', 'worst'}\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/input_array)``. If 'worst', score is ``1/min(input_array)``.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n l1_penalty : float, optional\n Coefficient for a penalty linear in the number of germs.\n\n Returns\n -------\n CompositeScore\n The maximum score for `germs`, indicating how many parameters it amplifies.\n \"\"\"\n def score_fn(x): return _scoring.list_score(x, score_func=score_func)\n if neighborhood is None:\n neighborhood = [target_model.randomize_with_unitary(randomization_strength)\n for n in range(neighborhood_size)]\n scores = [compute_composite_germ_set_score(score_fn, model=model,\n partial_germs_list=germs,\n op_penalty=op_penalty,\n l1_penalty=l1_penalty)\n for model in neighborhood]\n\n return max(scores)\n\n\ndef _get_model_params(model_list):\n \"\"\"\n Get the number of gates and gauge parameters of the models in a list.\n\n Also verifies all models have the same number of gates and gauge parameters.\n\n Parameters\n ----------\n model_list : list of Model\n A list of models for which you want an AC germ set.\n\n Returns\n -------\n reducedModelList : list of Model\n The original list of models with SPAM removed\n numGaugeParams : int\n The number of non-SPAM gauge parameters for all models.\n numNonGaugeParams : int\n The number of non-SPAM non-gauge parameters for all models.\n numOps : int\n The number of gates for all models.\n\n Raises\n ------\n ValueError\n If the number of gauge parameters or gates varies among the models.\n \"\"\"\n # We don't care about SPAM, since it can't be amplified.\n reducedModelList = [_remove_spam_vectors(model)\n for model in model_list]\n\n # All the models should have the same number of parameters and gates, but\n # let's be paranoid here for the time being and make sure.\n numGaugeParamsList = [reducedModel.num_gauge_params\n for reducedModel in reducedModelList]\n numGaugeParams = numGaugeParamsList[0]\n if not all([numGaugeParams == otherNumGaugeParams\n for otherNumGaugeParams in numGaugeParamsList[1:]]):\n raise ValueError(\"All models must have the same number of gauge \"\n \"parameters!\")\n\n numNonGaugeParamsList = [reducedModel.num_nongauge_params\n for reducedModel in reducedModelList]\n numNonGaugeParams = numNonGaugeParamsList[0]\n if not all([numNonGaugeParams == otherNumNonGaugeParams\n for otherNumNonGaugeParams in numNonGaugeParamsList[1:]]):\n raise ValueError(\"All models must have the same number of non-gauge \"\n \"parameters!\")\n\n numOpsList = [len(reducedModel.operations)\n for reducedModel in reducedModelList]\n numOps = numOpsList[0]\n if not all([numOps == otherNumOps\n for otherNumOps in numOpsList[1:]]):\n raise ValueError(\"All models must have the same number of gates!\")\n\n return reducedModelList, numGaugeParams, numNonGaugeParams, numOps\n\n\ndef _setup_model_list(model_list, randomize, randomization_strength,\n num_copies, seed):\n \"\"\"\n Sets up a list of randomize models (helper function).\n \"\"\"\n if not isinstance(model_list, (list, tuple)):\n model_list = [model_list]\n if len(model_list) > 1 and num_copies is not None:\n _warnings.warn(\"Ignoring num_copies={} since multiple models were \"\n \"supplied.\".format(num_copies))\n\n if randomize:\n model_list = randomize_model_list(model_list, randomization_strength,\n num_copies, seed)\n\n return model_list\n\n\ndef compute_composite_germ_set_score(score_fn, threshold_ac=1e6, init_n=1,\n partial_deriv_dagger_deriv=None, model=None,\n partial_germs_list=None, eps=None, num_gauge_params=None,\n op_penalty=0.0, germ_lengths=None, l1_penalty=0.0):\n \"\"\"\n Compute the score for a germ set when it is not AC against a model.\n\n Normally scores computed for germ sets against models for which they are\n not AC will simply be astronomically large. This is fine if AC is all you\n care about, but not so useful if you want to compare partial germ sets\n against one another to see which is closer to being AC. This function\n will see if the germ set is AC for the parameters corresponding to the\n largest `N` eigenvalues for increasing `N` until it finds a value of `N`\n for which the germ set is not AC or all the non gauge parameters are\n accounted for and report the value of `N` as well as the score.\n This allows partial germ set scores to be compared against one-another\n sensibly, where a larger value of `N` always beats a smaller value of `N`,\n and ties in the value of `N` are broken by the score for that value of `N`.\n\n Parameters\n ----------\n score_fn : callable\n A function that takes as input a list of sorted eigenvalues and returns\n a score for the partial germ set based on those eigenvalues, with lower\n scores indicating better germ sets. Usually some flavor of\n :func:`~pygsti.algorithms.scoring.list_score`.\n\n threshold_ac : float, optional\n Value which the score (before penalties are applied) must be lower than\n for the germ set to be considered AC.\n\n init_n : int\n The number of largest eigenvalues to begin with checking.\n\n partial_deriv_dagger_deriv : numpy.array, optional\n Array with three axes, where the first axis indexes individual germs\n within the partial germ set and the remaining axes index entries in the\n positive square of the Jacobian of each individual germ's parameters\n with respect to the model parameters.\n If this array is not supplied it will need to be computed from\n `germs_list` and `model`, which will take longer, so it is recommended\n to precompute this array if this routine will be called multiple times.\n\n model : Model, optional\n The model against which the germ set is to be scored. Not needed if\n `partial_deriv_dagger_deriv` is provided.\n\n partial_germs_list : list of Circuit, optional\n The list of germs in the partial germ set to be evaluated. Not needed\n if `partial_deriv_dagger_deriv` (and `germ_lengths` when\n ``op_penalty > 0``) are provided.\n\n eps : float, optional\n Used when calculating `partial_deriv_dagger_deriv` to determine if two\n eigenvalues are equal (see :func:`_bulk_twirled_deriv` for details). Not\n used if `partial_deriv_dagger_deriv` is provided.\n\n num_gauge_params : int\n The number of gauge parameters of the model. Not needed if `model`\n is provided.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n germ_lengths : numpy.array, optional\n The length of each germ. Not needed if `op_penalty` is ``0.0`` or\n `partial_germs_list` is provided.\n\n l1_penalty : float, optional\n Coefficient for a penalty linear in the number of germs.\n\n Returns\n -------\n CompositeScore\n The score for the germ set indicating how many parameters it amplifies\n and its numerical score restricted to those parameters.\n \"\"\"\n if partial_deriv_dagger_deriv is None:\n if model is None or partial_germs_list is None:\n raise ValueError(\"Must provide either partial_deriv_dagger_deriv or \"\n \"(model, partial_germs_list)!\")\n else:\n pDDD_kwargs = {'model': model, 'germs_list': partial_germs_list}\n if eps is not None:\n pDDD_kwargs['eps'] = eps\n if germ_lengths is not None:\n pDDD_kwargs['germ_lengths'] = germ_lengths\n partial_deriv_dagger_deriv = _compute_bulk_twirled_ddd(**pDDD_kwargs)\n\n if num_gauge_params is None:\n if model is None:\n raise ValueError(\"Must provide either num_gauge_params or model!\")\n else:\n num_gauge_params = _remove_spam_vectors(model).num_gauge_params\n\n # Calculate penalty scores\n numGerms = partial_deriv_dagger_deriv.shape[0]\n l1Score = l1_penalty * numGerms\n opScore = 0.0\n if op_penalty != 0.0:\n if germ_lengths is None:\n if partial_germs_list is None:\n raise ValueError(\"Must provide either germ_lengths or \"\n \"partial_germs_list when op_penalty != 0.0!\")\n else:\n germ_lengths = _np.array([len(germ)\n for germ in partial_germs_list])\n opScore = op_penalty * _np.sum(germ_lengths)\n\n combinedDDD = _np.sum(partial_deriv_dagger_deriv, axis=0)\n sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD)))\n observableEigenvals = sortedEigenvals[num_gauge_params:]\n N_AC = 0\n AC_score = _np.inf\n for N in range(init_n, len(observableEigenvals) + 1):\n scoredEigenvals = observableEigenvals[-N:]\n candidate_AC_score = score_fn(scoredEigenvals)\n if candidate_AC_score > threshold_ac:\n break # We've found a set of parameters for which the germ set\n # is not AC.\n else:\n AC_score = candidate_AC_score\n N_AC = N\n\n # OLD Apply penalties to the minor score; major part is just #amplified\n #major_score = N_AC\n #minor_score = AC_score + l1Score + opScore\n\n # Apply penalties to the major score\n major_score = -N_AC + opScore + l1Score\n minor_score = AC_score\n ret = _scoring.CompositeScore(major_score, minor_score, N_AC)\n #DEBUG: ret.extra = {'opScore': opScore,\n # 'sum(germ_lengths)': _np.sum(germ_lengths), 'l1': l1Score}\n return ret\n\n\ndef _compute_bulk_twirled_ddd(model, germs_list, eps=1e-6, check=False,\n germ_lengths=None, comm=None):\n \"\"\"\n Calculate the positive squares of the germ Jacobians.\n\n twirledDerivDaggerDeriv == array J.H*J contributions from each germ\n (J=Jacobian) indexed by (iGerm, iModelParam1, iModelParam2)\n size (nGerms, vec_model_dim, vec_model_dim)\n\n Parameters\n ----------\n model : Model\n The model defining the parameters to differentiate with respect to.\n\n germs_list : list\n The germ set\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n check : bool, optional\n Whether to perform internal consistency checks, at the expense of\n making the function slower.\n\n germ_lengths : numpy.ndarray, optional\n A pre-computed array of the length (depth) of each germ.\n\n comm : mpi4py.MPI.Comm, optional\n When not ``None``, an MPI communicator for distributing the computation\n across multiple processors.\n\n Returns\n -------\n twirledDerivDaggerDeriv : numpy.ndarray\n A complex array of shape `(len(germs), model.num_params, model.num_params)`.\n \"\"\"\n if germ_lengths is None:\n germ_lengths = _np.array([len(germ) for germ in germs_list])\n\n twirledDeriv = _bulk_twirled_deriv(model, germs_list, eps, check, comm) / germ_lengths[:, None, None]\n\n #OLD: slow, I think because conjugate *copies* a large tensor, causing a memory bottleneck\n #twirledDerivDaggerDeriv = _np.einsum('ijk,ijl->ikl',\n # _np.conjugate(twirledDeriv),\n # twirledDeriv)\n\n #NEW: faster, one-germ-at-a-time computation requires less memory.\n nGerms, _, vec_model_dim = twirledDeriv.shape\n twirledDerivDaggerDeriv = _np.empty((nGerms, vec_model_dim, vec_model_dim),\n dtype=_np.complex)\n for i in range(nGerms):\n twirledDerivDaggerDeriv[i, :, :] = _np.dot(\n twirledDeriv[i, :, :].conjugate().T, twirledDeriv[i, :, :])\n\n return twirledDerivDaggerDeriv\n\n\ndef _compute_twirled_ddd(model, germ, eps=1e-6):\n \"\"\"\n Calculate the positive squares of the germ Jacobian.\n\n twirledDerivDaggerDeriv == array J.H*J contributions from `germ`\n (J=Jacobian) indexed by (iModelParam1, iModelParam2)\n size (vec_model_dim, vec_model_dim)\n\n Parameters\n ----------\n model : Model\n The model defining the parameters to differentiate with respect to.\n\n germ : Circuit\n The (single) germ circuit to consider. `J` above is the twirled\n derivative of this circuit's action (process matrix).\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n twirledDeriv = _twirled_deriv(model, germ, eps) / len(germ)\n #twirledDerivDaggerDeriv = _np.einsum('jk,jl->kl',\n # _np.conjugate(twirledDeriv),\n # twirledDeriv)\n twirledDerivDaggerDeriv = _np.tensordot(_np.conjugate(twirledDeriv),\n twirledDeriv, (0, 0))\n\n return twirledDerivDaggerDeriv\n\n\ndef _germ_set_score_slack(weights, model_num, score_func, deriv_dagger_deriv_list,\n force_indices, force_score,\n n_gauge_params, op_penalty, germ_lengths, l1_penalty=1e-2,\n score_dict=None):\n \"\"\"\n Returns a germ set \"score\" in which smaller is better.\n\n Also returns intentionally bad score (`force_score`) if `weights` is zero on any of\n the \"forced\" germs (i.e. at any index in `forcedIndices`).\n This function is included for use by :func:`find_germs_integer_slack`,\n but is not convenient for just computing the score of a germ set. For that,\n use :func:`compute_germ_set_score`.\n\n Parameters\n ----------\n weights : list\n The per-germ \"selection weight\", indicating whether the germ\n is present in the selected germ set or not.\n\n model_num : int\n index into `deriv_dagger_deriv_list` indicating which model (typically in\n a neighborhood) we're computing scores for.\n\n score_func : {'all', 'worst'}\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/input_array)``. If 'worst', score is ``1/min(input_array)``.\n\n deriv_dagger_deriv_list : numpy.ndarray\n Array of J.T * J contributions for each model.\n\n force_indices : list of ints\n Indices marking the germs that *must* be in the final set (or else `force_score`\n will be returned).\n\n force_score : float\n The score that is returned when any of the germs indexed by `force_indices` are\n not present (i.e. their weights are <= 0).\n\n n_gauge_params : int\n The number of gauge (not amplifiable) parameters in the model.\n\n op_penalty : float\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n germ_lengths : numpy.ndarray\n A pre-computed array of the length (depth) of each germ.\n\n l1_penalty : float\n Coefficient for a penalty linear in the number of germs.\n\n score_dict : dict, optional\n A dictionary to cache the score valies for the given `model_num` and\n `weights`, i.e. `score_dict[model_num, tuple(weights)]` is set to the\n returned value.\n\n Returns\n -------\n float\n \"\"\"\n if force_indices is not None and _np.any(weights[force_indices] <= 0):\n score = force_score\n else:\n #combinedDDD = _np.einsum('i,ijk', weights,\n # deriv_dagger_deriv_list[model_num])\n combinedDDD = _np.squeeze(\n _np.tensordot(_np.expand_dims(weights, 1),\n deriv_dagger_deriv_list[model_num], (0, 0)))\n assert len(combinedDDD.shape) == 2\n\n sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD)))\n observableEigenvals = sortedEigenvals[n_gauge_params:]\n score = (_scoring.list_score(observableEigenvals, score_func)\n + l1_penalty * _np.sum(weights)\n + op_penalty * _np.dot(germ_lengths, weights))\n if score_dict is not None:\n # Side effect: calling _germ_set_score_slack caches result in score_dict\n score_dict[model_num, tuple(weights)] = score\n return score\n\n\ndef randomize_model_list(model_list, randomization_strength, num_copies,\n seed=None):\n \"\"\"\n Applies random unitary perturbations to a model or list of models.\n\n If `model_list` is a length-1 list, then `num_copies` determines how\n many randomizations to create. If `model_list` containes multiple\n models, then `num_copies` must be `None` and each model is\n randomized once to create the corresponding returned model.\n\n Parameters\n ----------\n model_list : Model or list\n A list of Model objects.\n\n randomization_strength : float, optional\n Strength of unitary randomizations, as passed to :method:`Model.randomize_with_unitary`.\n\n num_copies : int\n The number of random perturbations of `model_list[0]` to generate when\n `len(model_list) == 1`. A value of `None` will result in 1 copy. If\n `len(model_list) > 1` then `num_copies` must be set to None.\n\n seed : int, optional\n Starting seed for randomization. Successive randomizations receive\n successive seeds. `None` results in random seeds.\n\n Returns\n -------\n list\n A list of the randomized Models.\n \"\"\"\n if len(model_list) > 1 and num_copies is not None:\n raise ValueError(\"Input multiple models XOR request multiple \"\n \"copies only!\")\n\n newmodelList = []\n if len(model_list) > 1:\n for modelnum, model in enumerate(model_list):\n newmodelList.append(model.randomize_with_unitary(\n randomization_strength,\n seed=None if seed is None else seed + modelnum))\n else:\n for modelnum in range(num_copies if num_copies is not None else 1):\n newmodelList.append(model_list[0].randomize_with_unitary(\n randomization_strength,\n seed=None if seed is None else seed + modelnum))\n return newmodelList\n\n\ndef test_germs_list_completeness(model_list, germs_list, score_func, threshold):\n \"\"\"\n Check to see if the germs_list is amplificationally complete (AC).\n\n Checks for AC with respect to all the Models in `model_list`, returning\n the index of the first Model for which it is not AC or `-1` if it is AC\n for all Models.\n\n Parameters\n ----------\n model_list : list\n A list of models to test. Often this list is a neighborhood (\"cloud\") of\n models around a model of interest.\n\n germs_list : list\n A list of the germ :class:`Circuit`s (the \"germ set\") to test for completeness.\n\n score_func : {'all', 'worst'}\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/eigval_array)``. If 'worst', score is ``1/min(eigval_array)``.\n\n threshold : float, optional\n An eigenvalue of jacobian^T*jacobian is considered zero and thus a\n parameter un-amplified when its reciprocal is greater than threshold.\n Also used for eigenvector degeneracy testing in twirling operation.\n\n Returns\n -------\n int\n The index of the first model in `model_list` to fail the amplficational\n completeness test.\n \"\"\"\n for modelNum, model in enumerate(model_list):\n initial_test = test_germ_set_infl(model, germs_list,\n score_func=score_func,\n threshold=threshold)\n if not initial_test:\n return modelNum\n\n # If the germs_list is complete for all models, return -1\n return -1\n\n\ndef _remove_spam_vectors(model):\n \"\"\"\n Returns a copy of `model` with state preparations and effects removed.\n\n Parameters\n ----------\n model : Model\n The model to act on.\n\n Returns\n -------\n Model\n \"\"\"\n reducedModel = model.copy()\n for prepLabel in list(reducedModel.preps.keys()):\n del reducedModel.preps[prepLabel]\n for povmLabel in list(reducedModel.povms.keys()):\n del reducedModel.povms[povmLabel]\n return reducedModel\n\n\ndef _num_non_spam_gauge_params(model):\n \"\"\"\n Return the number of non-gauge, non-SPAM parameters in `model`.\n\n Equivalent to `_remove_spam_vectors(model).num_gauge_params`.\n\n Parameters\n ---------\n model : Model\n\n Parameters\n ----------\n model : Model\n The model to act on.\n\n Returns\n -------\n int\n \"\"\"\n return _remove_spam_vectors(model).num_gauge_params\n\n\n# wrt is op_dim x op_dim, so is M, Minv, Proj\n# so SOP is op_dim^2 x op_dim^2 and acts on vectorized *gates*\n# Recall vectorizing identity (when vec(.) concats rows as flatten does):\n# vec( A * X * B ) = A tensor B^T * vec( X )\ndef _super_op_for_perfect_twirl(wrt, eps):\n \"\"\"Return super operator for doing a perfect twirl with respect to wrt.\n \"\"\"\n assert wrt.shape[0] == wrt.shape[1] # only square matrices allowed\n dim = wrt.shape[0]\n SuperOp = _np.zeros((dim**2, dim**2), 'complex')\n\n # Get spectrum and eigenvectors of wrt\n wrtEvals, wrtEvecs = _np.linalg.eig(wrt)\n wrtEvecsInv = _np.linalg.inv(wrtEvecs)\n\n # We want to project X -> M * (Proj_i * (Minv * X * M) * Proj_i) * Minv,\n # where M = wrtEvecs. So A = B = M * Proj_i * Minv and so\n # superop = A tensor B^T == A tensor A^T\n # NOTE: this == (A^T tensor A)^T while *Maple* germ functions seem to just\n # use A^T tensor A -> ^T difference\n for i in range(dim):\n # Create projector onto i-th eigenspace (spanned by i-th eigenvector\n # and other degenerate eigenvectors)\n Proj_i = _np.diag([(1 if (abs(wrtEvals[i] - wrtEvals[j]) <= eps)\n else 0) for j in range(dim)])\n A = _np.dot(wrtEvecs, _np.dot(Proj_i, wrtEvecsInv))\n #if _np.linalg.norm(A.imag) > 1e-6:\n # print(\"DB: imag = \",_np.linalg.norm(A.imag))\n #assert(_np.linalg.norm(A.imag) < 1e-6)\n #A = _np.real(A)\n # Need to normalize, because we are overcounting projectors onto\n # subspaces of dimension d > 1, giving us d * Proj_i tensor Proj_i^T.\n # We can fix this with a division by tr(Proj_i) = d.\n SuperOp += _np.kron(A, A.T) / _np.trace(Proj_i)\n # SuperOp += _np.kron(A.T,A) # Mimic Maple version (but I think this is\n # wrong... or it doesn't matter?)\n return SuperOp # a op_dim^2 x op_dim^2 matrix\n\n\ndef _sq_sing_vals_from_deriv(deriv, weights=None):\n \"\"\"\n Calculate the squared singular values of the Jacobian of the germ set.\n\n Parameters\n ----------\n deriv : numpy.array\n Array of shape ``(nGerms, flattened_op_dim, vec_model_dim)``. Each\n sub-array corresponding to an individual germ is the Jacobian of the\n vectorized gate representation of that germ raised to some power with\n respect to the model parameters, normalized by dividing by the length\n of each germ after repetition.\n\n weights : numpy.array\n Array of length ``nGerms``, giving the relative contributions of each\n individual germ's Jacobian to the combined Jacobian (which is calculated\n as a convex combination of the individual Jacobians).\n\n Returns\n -------\n numpy.array\n The sorted squared singular values of the combined Jacobian of the germ\n set.\n \"\"\"\n # shape (nGerms, vec_model_dim, vec_model_dim)\n derivDaggerDeriv = _np.einsum('ijk,ijl->ikl', _np.conjugate(deriv), deriv)\n # awkward to convert to tensordot, so leave as einsum\n\n # Take the average of the D^dagger*D/L^2 matrices associated with each germ\n # with optional weights.\n combinedDDD = _np.average(derivDaggerDeriv, weights=weights, axis=0)\n sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD)))\n\n return sortedEigenvals\n\n\ndef _twirled_deriv(model, circuit, eps=1e-6):\n \"\"\"\n Compute the \"Twirled Derivative\" of a circuit.\n\n The twirled derivative is obtained by acting on the standard derivative of\n a circuit with the twirling superoperator.\n\n Parameters\n ----------\n model : Model object\n The Model which associates operation labels with operators.\n\n circuit : Circuit object\n A twirled derivative of this circuit's action (process matrix) is taken.\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n Returns\n -------\n numpy array\n An array of shape (op_dim^2, num_model_params)\n \"\"\"\n prod = model.sim.product(circuit)\n\n # flattened_op_dim x vec_model_dim\n dProd = model.sim.dproduct(circuit, flat=True)\n\n # flattened_op_dim x flattened_op_dim\n twirler = _super_op_for_perfect_twirl(prod, eps)\n\n # flattened_op_dim x vec_model_dim\n return _np.dot(twirler, dProd)\n\n\ndef _bulk_twirled_deriv(model, circuits, eps=1e-6, check=False, comm=None):\n \"\"\"\n Compute the \"Twirled Derivative\" of a set of circuits.\n\n The twirled derivative is obtained by acting on the standard derivative of\n a circuit with the twirling superoperator.\n\n Parameters\n ----------\n model : Model object\n The Model which associates operation labels with operators.\n\n circuits : list of Circuit objects\n A twirled derivative of this circuit's action (process matrix) is taken.\n\n eps : float, optional\n Tolerance used for testing whether two eigenvectors are degenerate\n (i.e. abs(eval1 - eval2) < eps ? )\n\n check : bool, optional\n Whether to perform internal consistency checks, at the expense of\n making the function slower.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n Returns\n -------\n numpy array\n An array of shape (num_simplified_circuits, op_dim^2, num_model_params)\n \"\"\"\n if len(model.preps) > 0 or len(model.povms) > 0:\n model = _remove_spam_vectors(model)\n # This function assumes model has no spam elements so `lookup` below\n # gives indexes into products computed by evalTree.\n\n resource_alloc = _baseobjs.ResourceAllocation(comm=comm)\n dProds, prods = model.sim.bulk_dproduct(circuits, flat=True, return_prods=True, resource_alloc=resource_alloc)\n op_dim = model.dim\n fd = op_dim**2 # flattened gate dimension\n nCircuits = len(circuits)\n\n ret = _np.empty((nCircuits, fd, dProds.shape[1]), 'complex')\n for i in range(nCircuits):\n # flattened_op_dim x flattened_op_dim\n twirler = _super_op_for_perfect_twirl(prods[i], eps)\n\n # flattened_op_dim x vec_model_dim\n ret[i] = _np.dot(twirler, dProds[i * fd:(i + 1) * fd])\n\n if check:\n for i, circuit in enumerate(circuits):\n chk_ret = _twirled_deriv(model, circuit, eps)\n if _nla.norm(ret[i] - chk_ret) > 1e-6:\n _warnings.warn(\"bulk twirled derivative norm mismatch = \"\n \"%g - %g = %g\"\n % (_nla.norm(ret[i]), _nla.norm(chk_ret),\n _nla.norm(ret[i] - chk_ret))) # pragma: no cover\n\n return ret # nSimplifiedCircuits x flattened_op_dim x vec_model_dim\n\n\ndef test_germ_set_finitel(model, germs_to_test, length, weights=None,\n return_spectrum=False, tol=1e-6):\n \"\"\"\n Test whether a set of germs is able to amplify all non-gauge parameters.\n\n Parameters\n ----------\n model : Model\n The Model (associates operation matrices with operation labels).\n\n germs_to_test : list of Circuits\n List of germ circuits to test for completeness.\n\n length : int\n The finite length to use in amplification testing. Larger\n values take longer to compute but give more robust results.\n\n weights : numpy array, optional\n A 1-D array of weights with length equal len(germs_to_test),\n which multiply the contribution of each germ to the total\n jacobian matrix determining parameter amplification. If\n None, a uniform weighting of 1.0/len(germs_to_test) is applied.\n\n return_spectrum : bool, optional\n If True, return the jacobian^T*jacobian spectrum in addition\n to the success flag.\n\n tol : float, optional\n Tolerance: an eigenvalue of jacobian^T*jacobian is considered\n zero and thus a parameter un-amplified when it is less than tol.\n\n Returns\n -------\n success : bool\n Whether all non-gauge parameters were amplified.\n spectrum : numpy array\n Only returned when `return_spectrum` is ``True``. Sorted array of\n eigenvalues (from small to large) of the jacobian^T * jacobian\n matrix used to determine parameter amplification.\n \"\"\"\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n model = _remove_spam_vectors(model)\n\n nGerms = len(germs_to_test)\n germToPowL = [germ * length for germ in germs_to_test]\n\n op_dim = model.dim\n dprods = model.sim.bulk_dproduct(germToPowL, flat=True) # shape (nGerms*flattened_op_dim, vec_model_dim)\n dprods.shape = (nGerms, op_dim**2, dprods.shape[1])\n\n germLengths = _np.array([len(germ) for germ in germs_to_test], 'd')\n\n normalizedDeriv = dprods / (length * germLengths[:, None, None])\n\n sortedEigenvals = _sq_sing_vals_from_deriv(normalizedDeriv, weights)\n\n nGaugeParams = model.num_gauge_params\n\n observableEigenvals = sortedEigenvals[nGaugeParams:]\n\n bSuccess = bool(_scoring.list_score(observableEigenvals, 'worst') < 1 / tol)\n\n return (bSuccess, sortedEigenvals) if return_spectrum else bSuccess\n\n\ndef test_germ_set_infl(model, germs_to_test, score_func='all', weights=None,\n return_spectrum=False, threshold=1e6, check=False):\n \"\"\"\n Test whether a set of germs is able to amplify all non-gauge parameters.\n\n Parameters\n ----------\n model : Model\n The Model (associates operation matrices with operation labels).\n\n germs_to_test : list of Circuit\n List of germ circuits to test for completeness.\n\n score_func : string\n Label to indicate how a germ set is scored. See\n :func:`~pygsti.algorithms.scoring.list_score` for details.\n\n weights : numpy array, optional\n A 1-D array of weights with length equal len(germs_to_test),\n which multiply the contribution of each germ to the total\n jacobian matrix determining parameter amplification. If\n None, a uniform weighting of 1.0/len(germs_to_test) is applied.\n\n return_spectrum : bool, optional\n If ``True``, return the jacobian^T*jacobian spectrum in addition\n to the success flag.\n\n threshold : float, optional\n An eigenvalue of jacobian^T*jacobian is considered zero and thus a\n parameter un-amplified when its reciprocal is greater than threshold.\n Also used for eigenvector degeneracy testing in twirling operation.\n\n check : bool, optional\n Whether to perform internal consistency checks, at the\n expense of making the function slower.\n\n Returns\n -------\n success : bool\n Whether all non-gauge parameters were amplified.\n spectrum : numpy array\n Only returned when `return_spectrum` is ``True``. Sorted array of\n eigenvalues (from small to large) of the jacobian^T * jacobian\n matrix used to determine parameter amplification.\n \"\"\"\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n model = _remove_spam_vectors(model)\n\n germLengths = _np.array([len(germ) for germ in germs_to_test], _np.int64)\n twirledDerivDaggerDeriv = _compute_bulk_twirled_ddd(model, germs_to_test,\n 1. / threshold, check,\n germLengths)\n # result[i] = _np.dot( twirledDeriv[i].H, twirledDeriv[i] ) i.e. matrix\n # product\n # result[i,k,l] = sum_j twirledDerivH[i,k,j] * twirledDeriv(i,j,l)\n # result[i,k,l] = sum_j twirledDeriv_conj[i,j,k] * twirledDeriv(i,j,l)\n\n if weights is None:\n nGerms = len(germs_to_test)\n # weights = _np.array( [1.0/nGerms]*nGerms, 'd')\n weights = _np.array([1.0] * nGerms, 'd')\n\n #combinedTDDD = _np.einsum('i,ijk->jk', weights, twirledDerivDaggerDeriv)\n combinedTDDD = _np.tensordot(weights, twirledDerivDaggerDeriv, (0, 0))\n sortedEigenvals = _np.sort(_np.real(_np.linalg.eigvalsh(combinedTDDD)))\n\n nGaugeParams = model.num_gauge_params\n observableEigenvals = sortedEigenvals[nGaugeParams:]\n\n bSuccess = bool(_scoring.list_score(observableEigenvals, score_func)\n < threshold)\n\n return (bSuccess, sortedEigenvals) if return_spectrum else bSuccess\n\n\ndef find_germs_depthfirst(model_list, germs_list, randomize=True,\n randomization_strength=1e-3, num_copies=None, seed=0, op_penalty=0,\n score_func='all', tol=1e-6, threshold=1e6, check=False,\n force=\"singletons\", verbosity=0):\n \"\"\"\n Greedy germ selection algorithm starting with 0 germs.\n\n Tries to minimize the number of germs needed to achieve amplificational\n completeness (AC). Begins with 0 germs and adds the germ that increases the\n score used to check for AC by the largest amount at each step, stopping when\n the threshold for AC is achieved.\n\n Parameters\n ----------\n model_list : Model or list\n The model or list of `Model`s to select germs for.\n\n germs_list : list of Circuit\n The list of germs to contruct a germ set from.\n\n randomize : bool, optional\n Whether or not to randomize `model_list` (usually just a single\n `Model`) with small (see `randomizationStrengh`) unitary maps\n in order to avoid \"accidental\" symmetries which could allow for\n fewer germs but *only* for that particular model. Setting this\n to `True` will increase the run time by a factor equal to the\n numer of randomized copies (`num_copies`).\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n\n num_copies : int, optional\n The number of randomized models to create when only a *single* gate\n set is passed via `model_list`. Otherwise, `num_copies` must be set\n to `None`.\n\n seed : int, optional\n Seed for generating random unitary perturbations to models.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n score_func : {'all', 'worst'}, optional\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/eigenvalues)``. If 'worst', score is\n ``1/min(eiganvalues)``.\n\n tol : float, optional\n Tolerance (`eps` arg) for :func:`_compute_bulk_twirled_ddd`, which sets\n the differece between eigenvalues below which they're treated as\n degenerate.\n\n threshold : float, optional\n Value which the score (before penalties are applied) must be lower than\n for a germ set to be considered AC.\n\n check : bool, optional\n Whether to perform internal checks (will slow down run time\n substantially).\n\n force : list of Circuits\n A list of `Circuit` objects which *must* be included in the final\n germ set. If the special string \"singletons\" is given, then all of\n the single gates (length-1 sequences) must be included.\n\n verbosity : int, optional\n Level of detail printed to stdout.\n\n Returns\n -------\n list\n A list of the built-up germ set (a list of :class:`Circuit` objects).\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n (reducedModelList,\n numGaugeParams, _, _) = _get_model_params(model_list)\n\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n numGerms = len(germs_list)\n\n weights = _np.zeros(numGerms, _np.int64)\n goodGerms = []\n if force:\n if force == \"singletons\":\n weights[_np.where(germLengths == 1)] = 1\n goodGerms = [germ for germ\n in _np.array(germs_list)[_np.where(germLengths == 1)]]\n else: # force should be a list of Circuits\n for opstr in force:\n weights[germs_list.index(opstr)] = 1\n goodGerms = force[:]\n\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list,\n score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.warning(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \". Aborting search.\")\n return None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n\n twirledDerivDaggerDerivList = [_compute_bulk_twirled_ddd(model, germs_list, tol,\n check, germLengths)\n for model in model_list]\n\n # Dict of keyword arguments passed to compute_score_non_AC that don't\n # change from call to call\n nonAC_kwargs = {\n 'score_fn': lambda x: _scoring.list_score(x, score_func=score_func),\n 'threshold_ac': threshold,\n 'num_gauge_params': numGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n }\n\n for modelNum, reducedModel in enumerate(reducedModelList):\n derivDaggerDeriv = twirledDerivDaggerDerivList[modelNum]\n # Make sure the set of germs you come up with is AC for all\n # models.\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n while _np.any(weights == 0):\n\n # As long as there are some unused germs, see if you need to add\n # another one.\n if test_germ_set_infl(reducedModel, goodGerms,\n score_func=score_func, threshold=threshold):\n # The germs are sufficient for the current model\n break\n candidateGerms = _np.where(weights == 0)[0]\n candidateGermScores = []\n for candidateGermIdx in _np.where(weights == 0)[0]:\n # If the germs aren't sufficient, try adding a single germ\n candidateWeights = weights.copy()\n candidateWeights[candidateGermIdx] = 1\n partialDDD = derivDaggerDeriv[\n _np.where(candidateWeights == 1)[0], :, :]\n candidateGermScore = compute_composite_germ_set_score(\n partial_deriv_dagger_deriv=partialDDD, **nonAC_kwargs)\n candidateGermScores.append(candidateGermScore)\n # Add the germ that give the best score\n bestCandidateGerm = candidateGerms[_np.array(\n candidateGermScores).argmin()]\n weights[bestCandidateGerm] = 1\n goodGerms.append(germs_list[bestCandidateGerm])\n\n return goodGerms\n\n\ndef find_germs_breadthfirst(model_list, germs_list, randomize=True,\n randomization_strength=1e-3, num_copies=None, seed=0,\n op_penalty=0, score_func='all', tol=1e-6, threshold=1e6,\n check=False, force=\"singletons\", pretest=True, mem_limit=None,\n comm=None, profiler=None, verbosity=0):\n \"\"\"\n Greedy algorithm starting with 0 germs.\n\n Tries to minimize the number of germs needed to achieve amplificational\n completeness (AC). Begins with 0 germs and adds the germ that increases the\n score used to check for AC by the largest amount (for the model that\n currently has the lowest score) at each step, stopping when the threshold\n for AC is achieved. This strategy is something of a \"breadth-first\"\n approach, in contrast to :func:`find_germs_depthfirst`, which only looks at the\n scores for one model at a time until that model achieves AC, then\n turning it's attention to the remaining models.\n\n Parameters\n ----------\n model_list : Model or list\n The model or list of `Model`s to select germs for.\n\n germs_list : list of Circuit\n The list of germs to contruct a germ set from.\n\n randomize : bool, optional\n Whether or not to randomize `model_list` (usually just a single\n `Model`) with small (see `randomizationStrengh`) unitary maps\n in order to avoid \"accidental\" symmetries which could allow for\n fewer germs but *only* for that particular model. Setting this\n to `True` will increase the run time by a factor equal to the\n numer of randomized copies (`num_copies`).\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n\n num_copies : int, optional\n The number of randomized models to create when only a *single* gate\n set is passed via `model_list`. Otherwise, `num_copies` must be set\n to `None`.\n\n seed : int, optional\n Seed for generating random unitary perturbations to models.\n\n op_penalty : float, optional\n Coefficient for a penalty linear in the sum of the germ lengths.\n\n score_func : {'all', 'worst'}, optional\n Sets the objective function for scoring the eigenvalues. If 'all',\n score is ``sum(1/eigenvalues)``. If 'worst', score is\n ``1/min(eiganvalues)``.\n\n tol : float, optional\n Tolerance (`eps` arg) for :func:`_compute_bulk_twirled_ddd`, which sets\n the differece between eigenvalues below which they're treated as\n degenerate.\n\n threshold : float, optional\n Value which the score (before penalties are applied) must be lower than\n for a germ set to be considered AC.\n\n check : bool, optional\n Whether to perform internal checks (will slow down run time\n substantially).\n\n force : list of Circuits\n A list of `Circuit` objects which *must* be included in the final\n germ set. If the special string \"singletons\" is given, then all of\n the single gates (length-1 sequences) must be included.\n\n pretest : boolean, optional\n Whether germ list should be initially checked for completeness.\n\n mem_limit : int, optional\n A rough memory limit in bytes which restricts the amount of intermediate\n values that are computed and stored.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n profiler : Profiler, optional\n A profiler object used for to track timing and memory usage.\n\n verbosity : int, optional\n Level of detail printed to stdout.\n\n Returns\n -------\n list\n A list of the built-up germ set (a list of :class:`Circuit` objects).\n \"\"\"\n if comm is not None and comm.Get_size() > 1:\n from mpi4py import MPI # not at top so pygsti doesn't require mpi4py\n\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n dim = model_list[0].dim\n #Np = model_list[0].num_params #wrong:? includes spam...\n Np = model_list[0].num_params\n #print(\"DB Np = %d, Ng = %d\" % (Np,Ng))\n assert(all([(mdl.dim == dim) for mdl in model_list])), \\\n \"All models must have the same dimension!\"\n #assert(all([(mdl.num_params == Np) for mdl in model_list])), \\\n # \"All models must have the same number of parameters!\"\n\n (_, numGaugeParams,\n numNonGaugeParams, _) = _get_model_params(model_list)\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n\n numGerms = len(germs_list)\n\n goodGerms = []\n weights = _np.zeros(numGerms, _np.int64)\n if force:\n if force == \"singletons\":\n weights[_np.where(germLengths == 1)] = 1\n goodGerms = [germ for i, germ in enumerate(germs_list) if germLengths[i] == 1]\n else: # force should be a list of Circuits\n for opstr in force:\n weights[germs_list.index(opstr)] = 1\n goodGerms = force[:]\n\n if pretest:\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list,\n score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.warning(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \".\")\n printer.warning(\"Aborting search.\")\n return None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n\n mode = \"all-Jac\" # compute a all the possible germ's jacobians at once up\n # front and store them separately (requires lots of mem)\n\n if mem_limit is not None:\n memEstimate = FLOATSIZE * len(model_list) * len(germs_list) * Np**2\n # for _compute_bulk_twirled_ddd\n memEstimate += FLOATSIZE * len(model_list) * len(germs_list) * dim**2 * Np\n # for _bulk_twirled_deriv sub-call\n printer.log(\"Memory estimate of %.1f GB (%.1f GB limit) for all-Jac mode.\" %\n (memEstimate / 1024.0**3, mem_limit / 1024.0**3), 1)\n\n if memEstimate > mem_limit:\n mode = \"single-Jac\" # compute a single germ's jacobian at a time\n # and store the needed J-sum over chosen germs.\n memEstimate = FLOATSIZE * 3 * len(model_list) * Np**2 + \\\n FLOATSIZE * 3 * len(model_list) * dim**2 * Np\n #Factor of 3 accounts for currentDDDs, testDDDs, and bestDDDs\n printer.log(\"Memory estimate of %.1f GB (%.1f GB limit) for single-Jac mode.\" %\n (memEstimate / 1024.0**3, mem_limit / 1024.0**3), 1)\n\n if memEstimate > mem_limit:\n raise MemoryError(\"Too little memory, even for single-Jac mode!\")\n\n twirledDerivDaggerDerivList = None\n\n if mode == \"all-Jac\":\n twirledDerivDaggerDerivList = \\\n [_compute_bulk_twirled_ddd(model, germs_list, tol,\n check, germLengths, comm)\n for model in model_list]\n\n currentDDDList = []\n for i, derivDaggerDeriv in enumerate(twirledDerivDaggerDerivList):\n currentDDDList.append(_np.sum(derivDaggerDeriv[_np.where(weights == 1)[0], :, :], axis=0))\n\n elif mode == \"single-Jac\":\n currentDDDList = [_np.zeros((Np, Np), 'complex') for mdl in model_list]\n\n loc_Indices, _, _ = _mpit.distribute_indices(\n list(range(len(goodGerms))), comm, False)\n\n with printer.progress_logging(3):\n for i, goodGermIdx in enumerate(loc_Indices):\n printer.show_progress(i, len(loc_Indices),\n prefix=\"Initial germ set computation\",\n suffix=germs_list[goodGermIdx].str)\n #print(\"DB: Rank%d computing initial index %d\" % (comm.Get_rank(),goodGermIdx))\n\n for k, model in enumerate(model_list):\n currentDDDList[k] += _compute_twirled_ddd(\n model, germs_list[goodGermIdx], tol)\n\n #aggregate each currendDDDList across all procs\n if comm is not None and comm.Get_size() > 1:\n for k, model in enumerate(model_list):\n result = _np.empty((Np, Np), 'complex')\n comm.Allreduce(currentDDDList[k], result, op=MPI.SUM)\n currentDDDList[k][:, :] = result[:, :]\n result = None # free mem\n\n else: # should be unreachable since we set 'mode' internally above\n raise ValueError(\"Invalid mode: %s\" % mode) # pragma: no cover\n\n # Dict of keyword arguments passed to compute_score_non_AC that don't\n # change from call to call\n nonAC_kwargs = {\n 'score_fn': lambda x: _scoring.list_score(x, score_func=score_func),\n 'threshold_ac': threshold,\n 'num_gauge_params': numGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n }\n\n initN = 1\n while _np.any(weights == 0):\n printer.log(\"Outer iteration: %d of %d amplified, %d germs\" %\n (initN, numNonGaugeParams, len(goodGerms)), 2)\n # As long as there are some unused germs, see if you need to add\n # another one.\n if initN == numNonGaugeParams:\n break # We are AC for all models, so we can stop adding germs.\n\n candidateGermIndices = _np.where(weights == 0)[0]\n loc_candidateIndices, owners, _ = _mpit.distribute_indices(\n candidateGermIndices, comm, False)\n\n # Since the germs aren't sufficient, add the best single candidate germ\n bestDDDs = None\n bestGermScore = _scoring.CompositeScore(1.0e100, 0, None) # lower is better\n iBestCandidateGerm = None\n with printer.progress_logging(3):\n for i, candidateGermIdx in enumerate(loc_candidateIndices):\n printer.show_progress(i, len(loc_candidateIndices),\n prefix=\"Inner iter over candidate germs\",\n suffix=germs_list[candidateGermIdx].str)\n\n #print(\"DB: Rank%d computing index %d\" % (comm.Get_rank(),candidateGermIdx))\n worstScore = _scoring.CompositeScore(-1.0e100, 0, None) # worst of all models\n\n # Loop over all models\n testDDDs = []\n for k, currentDDD in enumerate(currentDDDList):\n testDDD = currentDDD.copy()\n\n if mode == \"all-Jac\":\n #just get cached value of deriv-dagger-deriv\n derivDaggerDeriv = twirledDerivDaggerDerivList[k][candidateGermIdx]\n testDDD += derivDaggerDeriv\n\n elif mode == \"single-Jac\":\n #compute value of deriv-dagger-deriv\n model = model_list[k]\n testDDD += _compute_twirled_ddd(\n model, germs_list[candidateGermIdx], tol)\n # (else already checked above)\n\n nonAC_kwargs['germ_lengths'] = \\\n _np.array([len(germ) for germ in\n (goodGerms + [germs_list[candidateGermIdx]])])\n worstScore = max(worstScore, compute_composite_germ_set_score(\n partial_deriv_dagger_deriv=testDDD[None, :, :], init_n=initN,\n **nonAC_kwargs))\n testDDDs.append(testDDD) # save in case this is a keeper\n\n # Take the score for the current germ to be its worst score\n # over all the models.\n germScore = worstScore\n printer.log(str(germScore), 4)\n if germScore < bestGermScore:\n bestGermScore = germScore\n iBestCandidateGerm = candidateGermIdx\n bestDDDs = testDDDs\n testDDDs = None\n\n # Add the germ that gives the best germ score\n if comm is not None and comm.Get_size() > 1:\n #figure out which processor has best germ score and distribute\n # its information to the rest of the procs\n globalMinScore = comm.allreduce(bestGermScore, op=MPI.MIN)\n toSend = comm.Get_rank() if (globalMinScore == bestGermScore) \\\n else comm.Get_size() + 1\n winningRank = comm.allreduce(toSend, op=MPI.MIN)\n bestGermScore = globalMinScore\n toCast = iBestCandidateGerm if (comm.Get_rank() == winningRank) else None\n iBestCandidateGerm = comm.bcast(toCast, root=winningRank)\n for k in range(len(model_list)):\n comm.Bcast(bestDDDs[k], root=winningRank)\n\n #Update variables for next outer iteration\n weights[iBestCandidateGerm] = 1\n initN = bestGermScore.N\n goodGerms.append(germs_list[iBestCandidateGerm])\n\n for k in range(len(model_list)):\n currentDDDList[k][:, :] = bestDDDs[k][:, :]\n bestDDDs[k] = None\n\n printer.log(\"Added %s to final germs (%s)\" %\n (germs_list[iBestCandidateGerm].str, str(bestGermScore)), 3)\n\n return goodGerms\n\n\n#@profile\ndef find_germs_integer_slack(model_list, germs_list, randomize=True,\n randomization_strength=1e-3, num_copies=None,\n seed=0, l1_penalty=1e-2, op_penalty=0,\n initial_weights=None, score_func='all',\n max_iter=100, fixed_slack=False,\n slack_frac=False, return_all=False, tol=1e-6,\n check=False, force=\"singletons\",\n force_score=1e100, threshold=1e6,\n verbosity=1):\n \"\"\"\n Find a locally optimal subset of the germs in germs_list.\n\n Locally optimal here means that no single germ can be excluded\n without making the smallest non-gauge eigenvalue of the\n Jacobian.H*Jacobian matrix smaller, i.e. less amplified,\n by more than a fixed or variable amount of \"slack\", as\n specified by `fixed_slack` or `slack_frac`.\n\n Parameters\n ----------\n model_list : Model or list of Model\n The list of Models to be tested. To ensure that the returned germ\n set is amplficationally complete, it is a good idea to score potential\n germ sets against a collection (~5-10) of similar models. The user\n may specify a single Model and a number of unitarily close copies to\n be made (set by the kwarg `num_copies`), or the user may specify their\n own list of Models, each of which in turn may or may not be\n randomized (set by the kwarg `randomize`).\n\n germs_list : list of Circuit\n List of all germ circuits to consider.\n\n randomize : Bool, optional\n Whether or not the input Model(s) are first subject to unitary\n randomization. If ``False``, the user should perform the unitary\n randomization themselves. Note: If the Model(s) are perfect (e.g.\n ``std1Q_XYI.target_model()``), then the germ selection output should not be\n trusted, due to accidental degeneracies in the Model. If the\n Model(s) include stochastic (non-unitary) error, then germ selection\n will fail, as we score amplificational completeness in the limit of\n infinite sequence length (so any stochastic noise will completely\n depolarize any sequence in that limit). Default is ``True``.\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n Default is ``1e-3``.\n\n num_copies : int, optional\n The number of Model copies to be made of the input Model (prior to\n unitary randomization). If more than one Model is passed in,\n `num_copies` should be ``None``. If only one Model is passed in and\n `num_copies` is ``None``, no extra copies are made.\n\n seed : float, optional\n The starting seed used for unitary randomization. If multiple Models\n are to be randomized, ``model_list[i]`` is randomized with ``seed +\n i``. Default is 0.\n\n l1_penalty : float, optional\n How strong the penalty should be for increasing the germ set list by a\n single germ. Default is 1e-2.\n\n op_penalty : float, optional\n How strong the penalty should be for increasing a germ in the germ set\n list by a single gate. Default is 0.\n\n initial_weights : list-like\n List or array of either booleans or (0 or 1) integers\n specifying which germs in `germ_list` comprise the initial\n germ set. If ``None``, then starting point includes all\n germs.\n\n score_func : string\n Label to indicate how a germ set is scored. See\n :func:`~pygsti.algorithms.scoring.list_score` for details.\n\n max_iter : int, optional\n The maximum number of iterations before giving up.\n\n fixed_slack : float, optional\n If not ``None``, a floating point number which specifies that excluding\n a germ is allowed to increase 1.0/smallest-non-gauge-eigenvalue by\n `fixed_slack`. You must specify *either* `fixed_slack` or `slack_frac`.\n\n slack_frac : float, optional\n If not ``None``, a floating point number which specifies that excluding\n a germ is allowed to increase 1.0/smallest-non-gauge-eigenvalue by\n `fixedFrac`*100 percent. You must specify *either* `fixed_slack` or\n `slack_frac`.\n\n return_all : bool, optional\n If ``True``, return the final ``weights`` vector and score dictionary\n in addition to the optimal germ list (see below).\n\n tol : float, optional\n Tolerance used for eigenvector degeneracy testing in twirling\n operation.\n\n check : bool, optional\n Whether to perform internal consistency checks, at the\n expense of making the function slower.\n\n force : str or list, optional\n A list of Circuits which *must* be included in the final germ set.\n If set to the special string \"singletons\" then all length-1 strings will\n be included. Seting to None is the same as an empty list.\n\n force_score : float, optional (default is 1e100)\n When `force` designates a non-empty set of circuits, the score to\n assign any germ set that does not contain each and every required germ.\n\n threshold : float, optional (default is 1e6)\n Specifies a maximum score for the score matrix, above which the germ\n set is rejected as amplificationally incomplete.\n\n verbosity : int, optional\n Integer >= 0 indicating the amount of detail to print.\n\n See Also\n --------\n :class:`~pygsti.objects.Model`\n :class:`~pygsti.objects.Circuit`\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n if (fixed_slack and slack_frac) or (not fixed_slack and not slack_frac):\n raise ValueError(\"Either fixed_slack *or* slack_frac should be specified\")\n\n if initial_weights is not None:\n if len(germs_list) != len(initial_weights):\n raise ValueError(\"The lengths of germs_list (%d) and \"\n \"initial_weights (%d) must match.\"\n % (len(germs_list), len(initial_weights)))\n # Normalize the weights array to be 0s and 1s even if it is provided as\n # bools\n weights = _np.array([1 if x else 0 for x in initial_weights])\n else:\n weights = _np.ones(len(germs_list), _np.int64) # default: start with all germs\n# lessWeightOnly = True # we're starting at the max-weight vector\n\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list, score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.log(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \".\", 1)\n printer.log(\"Aborting search.\", 1)\n return (None, None, None) if return_all else None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n\n num_models = len(model_list)\n\n # Remove any SPAM vectors from model since we only want\n # to consider the set of *gate* parameters for amplification\n # and this makes sure our parameter counting is correct\n model0 = _remove_spam_vectors(model_list[0])\n\n # Initially allow adding to weight. -- maybe make this an argument??\n lessWeightOnly = False\n\n nGaugeParams = model0.num_gauge_params\n\n # score dictionary:\n # keys = (modelNum, tuple-ized weight vector of 1's and 0's only)\n # values = list_score\n scoreD = {}\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n\n if force:\n if force == \"singletons\":\n forceIndices = _np.where(germLengths == 1)\n else: # force should be a list of Circuits\n forceIndices = _np.array([germs_list.index(opstr) for opstr in force])\n else:\n forceIndices = None\n\n twirledDerivDaggerDerivList = [_compute_bulk_twirled_ddd(model, germs_list, tol)\n for model in model_list]\n\n # Dict of keyword arguments passed to _germ_set_score_slack that don't change from\n # call to call\n cs_kwargs = {\n 'score_func': score_func,\n 'deriv_dagger_deriv_list': twirledDerivDaggerDerivList,\n 'force_indices': forceIndices,\n 'force_score': force_score,\n 'n_gauge_params': nGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n 'l1_penalty': l1_penalty,\n 'score_dict': scoreD,\n }\n\n scoreList = [_germ_set_score_slack(weights, model_num, **cs_kwargs)\n for model_num in range(num_models)]\n score = _np.max(scoreList)\n L1 = sum(weights) # ~ L1 norm of weights\n\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n printer.log(\"Model has %d gauge params.\" % nGaugeParams, 1)\n\n def _get_neighbors(bool_vec):\n for i in range(len(bool_vec)):\n v = bool_vec.copy()\n v[i] = (v[i] + 1) % 2 # Toggle v[i] btwn 0 and 1\n yield v\n\n with printer.progress_logging(1):\n for iIter in range(max_iter):\n printer.show_progress(iIter, max_iter,\n suffix=\"score=%g, nGerms=%d\" % (score, L1))\n\n bFoundBetterNeighbor = False\n for neighbor in _get_neighbors(weights):\n neighborScoreList = []\n for model_num in range(len(model_list)):\n if (model_num, tuple(neighbor)) not in scoreD:\n neighborL1 = sum(neighbor)\n neighborScoreList.append(_germ_set_score_slack(neighbor,\n model_num,\n **cs_kwargs))\n else:\n neighborL1 = sum(neighbor)\n neighborScoreList.append(scoreD[model_num,\n tuple(neighbor)])\n\n neighborScore = _np.max(neighborScoreList) # Take worst case.\n # Move if we've found better position; if we've relaxed, we\n # only move when L1 is improved.\n if neighborScore <= score and (neighborL1 < L1 or not lessWeightOnly):\n weights, score, L1 = neighbor, neighborScore, neighborL1\n bFoundBetterNeighbor = True\n\n printer.log(\"Found better neighbor: \"\n \"nGerms = %d score = %g\" % (L1, score), 2)\n\n if not bFoundBetterNeighbor: # Time to relax our search.\n # From now on, don't allow increasing weight L1\n lessWeightOnly = True\n\n if fixed_slack is False:\n # Note score is positive (for sum of 1/lambda)\n slack = score * slack_frac\n # print \"slack =\", slack\n else:\n slack = fixed_slack\n assert slack > 0\n\n printer.log(\"No better neighbor. Relaxing score w/slack: \"\n + \"%g => %g\" % (score, score + slack), 2)\n # Artificially increase score and see if any neighbor is better\n # now...\n score += slack\n\n for neighbor in _get_neighbors(weights):\n scoreList = [scoreD[model_num, tuple(neighbor)]\n for model_num in range(len(model_list))]\n maxScore = _np.max(scoreList)\n if sum(neighbor) < L1 and maxScore < score:\n weights, score, L1 = neighbor, maxScore, sum(neighbor)\n bFoundBetterNeighbor = True\n printer.log(\"Found better neighbor: \"\n \"nGerms = %d score = %g\" % (L1, score), 2)\n\n if not bFoundBetterNeighbor: # Relaxing didn't help!\n printer.log(\"Stationary point found!\", 1)\n break # end main for loop\n\n printer.log(\"Moving to better neighbor\", 1)\n # print score\n else:\n printer.log(\"Hit max. iterations\", 1)\n\n printer.log(\"score = %s\" % score, 1)\n printer.log(\"weights = %s\" % weights, 1)\n printer.log(\"L1(weights) = %s\" % sum(weights), 1)\n\n goodGerms = []\n for index, val in enumerate(weights):\n if val == 1:\n goodGerms.append(germs_list[index])\n\n if return_all:\n return goodGerms, weights, scoreD\n else:\n return goodGerms\n\n\ndef _germ_set_score_grasp(germ_set, germs_list, twirled_deriv_dagger_deriv_list,\n non_ac_kwargs, init_n=1):\n \"\"\"\n Score a germ set against a collection of models.\n\n Calculate the score of the germ set with respect to each member of a\n collection of models and return the worst score among that collection.\n\n Parameters\n ----------\n germ_set : list of Circuit\n The set of germs to score.\n\n germs_list : list of Circuit\n The list of all germs whose Jacobians are provided in\n `twirled_deriv_dagger_deriv_list`.\n\n twirled_deriv_dagger_deriv_list : numpy.array\n Jacobians for all the germs in `germs_list` stored as a 3-dimensional\n array, where the first index indexes the particular germ.\n\n non_ac_kwargs : dict\n Dictionary containing further arguments to pass to\n :func:`compute_composite_germ_set_score` for the scoring of the germ set against\n individual models.\n\n init_n : int\n The number of eigenvalues to begin checking for amplificational\n completeness with respect to. Passed as an argument to\n :func:`compute_composite_germ_set_score`.\n\n Returns\n -------\n CompositeScore\n The worst score over all models of the germ set.\n \"\"\"\n weights = _np.zeros(len(germs_list))\n for germ in germ_set:\n weights[germs_list.index(germ)] = 1\n germsVsModelScores = []\n for derivDaggerDeriv in twirled_deriv_dagger_deriv_list:\n # Loop over all models\n partialDDD = derivDaggerDeriv[_np.where(weights == 1)[0], :, :]\n germsVsModelScores.append(compute_composite_germ_set_score(\n partial_deriv_dagger_deriv=partialDDD, init_n=init_n, **non_ac_kwargs))\n # Take the score for the current germ set to be its worst score over all\n # models.\n return max(germsVsModelScores)\n\n\ndef find_germs_grasp(model_list, germs_list, alpha, randomize=True,\n randomization_strength=1e-3, num_copies=None,\n seed=None, l1_penalty=1e-2, op_penalty=0.0,\n score_func='all', tol=1e-6, threshold=1e6,\n check=False, force=\"singletons\",\n iterations=5, return_all=False, shuffle=False,\n verbosity=0):\n \"\"\"\n Use GRASP to find a high-performing germ set.\n\n Parameters\n ----------\n model_list : Model or list of Model\n The list of Models to be tested. To ensure that the returned germ\n set is amplficationally complete, it is a good idea to score potential\n germ sets against a collection (~5-10) of similar models. The user\n may specify a single Model and a number of unitarily close copies to\n be made (set by the kwarg `num_copies`, or the user may specify their\n own list of Models, each of which in turn may or may not be\n randomized (set by the kwarg `randomize`).\n\n germs_list : list of Circuit\n List of all germ circuits to consider.\n\n alpha : float\n A number between 0 and 1 that roughly specifies a score theshold\n relative to the spread of scores that a germ must score better than in\n order to be included in the RCL. A value of 0 for `alpha` corresponds\n to a purely greedy algorithm (only the best-scoring germ set is\n included in the RCL), while a value of 1 for `alpha` will include all\n germs in the RCL.\n See :func:`pygsti.algorithms.scoring.filter_composite_rcl` for more details.\n\n randomize : Bool, optional\n Whether or not the input Model(s) are first subject to unitary\n randomization. If ``False``, the user should perform the unitary\n randomization themselves. Note: If the Model(s) are perfect (e.g.\n ``std1Q_XYI.target_model()``), then the germ selection output should not be\n trusted, due to accidental degeneracies in the Model. If the\n Model(s) include stochastic (non-unitary) error, then germ selection\n will fail, as we score amplificational completeness in the limit of\n infinite sequence length (so any stochastic noise will completely\n depolarize any sequence in that limit).\n\n randomization_strength : float, optional\n The strength of the unitary noise used to randomize input Model(s);\n is passed to :func:`~pygsti.objects.Model.randomize_with_unitary`.\n Default is ``1e-3``.\n\n num_copies : int, optional\n The number of Model copies to be made of the input Model (prior to\n unitary randomization). If more than one Model is passed in,\n `num_copies` should be ``None``. If only one Model is passed in and\n `num_copies` is ``None``, no extra copies are made.\n\n seed : float, optional\n The starting seed used for unitary randomization. If multiple Models\n are to be randomized, ``model_list[i]`` is randomized with ``seed +\n i``.\n\n l1_penalty : float, optional\n How strong the penalty should be for increasing the germ set list by a\n single germ. Used for choosing between outputs of various GRASP\n iterations.\n\n op_penalty : float, optional\n How strong the penalty should be for increasing a germ in the germ set\n list by a single gate.\n\n score_func : string\n Label to indicate how a germ set is scored. See\n :func:`~pygsti.algorithms.scoring.list_score` for details.\n\n tol : float, optional\n Tolerance used for eigenvector degeneracy testing in twirling\n operation.\n\n threshold : float, optional (default is 1e6)\n Specifies a maximum score for the score matrix, above which the germ\n set is rejected as amplificationally incomplete.\n\n check : bool, optional\n Whether to perform internal consistency checks, at the\n expense of making the function slower.\n\n force : str or list, optional\n A list of Circuits which *must* be included in the final germ set.\n If set to the special string \"singletons\" then all length-1 strings will\n be included. Seting to None is the same as an empty list.\n\n iterations : int, optional\n The number of GRASP iterations to perform.\n\n return_all : bool, optional\n Flag set to tell the routine if it should return lists of all\n initial constructions and local optimizations in addition to the\n optimal solution (useful for diagnostic purposes or if you're not sure\n what your `finalScoreFn` should really be).\n\n shuffle : bool, optional\n Whether the neighborhood should be presented to the optimizer in a\n random order (important since currently the local optimizer updates the\n solution to the first better solution it finds in the neighborhood).\n\n verbosity : int, optional\n Integer >= 0 indicating the amount of detail to print.\n\n Returns\n -------\n finalGermList : list of Circuit\n Sublist of `germs_list` specifying the final, optimal set of germs.\n \"\"\"\n printer = _baseobjs.VerbosityPrinter.create_printer(verbosity)\n\n model_list = _setup_model_list(model_list, randomize,\n randomization_strength, num_copies, seed)\n\n (_, numGaugeParams,\n numNonGaugeParams, _) = _get_model_params(model_list)\n\n germLengths = _np.array([len(germ) for germ in germs_list], _np.int64)\n\n numGerms = len(germs_list)\n\n initialWeights = _np.zeros(numGerms, dtype=_np.int64)\n if force:\n if force == \"singletons\":\n initialWeights[_np.where(germLengths == 1)] = 1\n else: # force should be a list of Circuits\n for opstr in force:\n initialWeights[germs_list.index(opstr)] = 1\n\n def get_neighbors_fn(weights): return _grasp.get_swap_neighbors(\n weights, forced_weights=initialWeights, shuffle=shuffle)\n\n undercompleteModelNum = test_germs_list_completeness(model_list,\n germs_list,\n score_func,\n threshold)\n if undercompleteModelNum > -1:\n printer.warning(\"Complete initial germ set FAILS on model \"\n + str(undercompleteModelNum) + \".\")\n printer.warning(\"Aborting search.\")\n return (None, None, None) if return_all else None\n\n printer.log(\"Complete initial germ set succeeds on all input models.\", 1)\n printer.log(\"Now searching for best germ set.\", 1)\n\n printer.log(\"Starting germ set optimization. Lower score is better.\", 1)\n\n twirledDerivDaggerDerivList = [_compute_bulk_twirled_ddd(model, germs_list, tol,\n check, germLengths)\n for model in model_list]\n\n # Dict of keyword arguments passed to compute_score_non_AC that don't\n # change from call to call\n nonAC_kwargs = {\n 'score_fn': lambda x: _scoring.list_score(x, score_func=score_func),\n 'threshold_ac': threshold,\n 'num_gauge_params': numGaugeParams,\n 'op_penalty': op_penalty,\n 'germ_lengths': germLengths,\n }\n\n final_nonAC_kwargs = nonAC_kwargs.copy()\n final_nonAC_kwargs['l1_penalty'] = l1_penalty\n\n scoreFn = (lambda germSet:\n _germ_set_score_grasp(germSet, germs_list,\n twirledDerivDaggerDerivList, nonAC_kwargs,\n init_n=1))\n finalScoreFn = (lambda germSet:\n _germ_set_score_grasp(germSet, germs_list,\n twirledDerivDaggerDerivList,\n final_nonAC_kwargs, init_n=1))\n\n #OLD: feasibleThreshold = _scoring.CompositeScore(-numNonGaugeParams,threshold,numNonGaugeParams))\n def _feasible_fn(germ_set): # now that scoring is not ordered entirely by N\n s = _germ_set_score_grasp(germ_set, germs_list,\n twirledDerivDaggerDerivList, nonAC_kwargs,\n init_n=1)\n return (s.N >= numNonGaugeParams and s.minor < threshold)\n\n def rcl_fn(x): return _scoring.filter_composite_rcl(x, alpha)\n\n initialSolns = []\n localSolns = []\n\n for iteration in range(iterations):\n # This loop is parallelizable (each iteration is independent of all\n # other iterations).\n printer.log('Starting iteration {} of {}.'.format(iteration + 1,\n iterations), 1)\n success = False\n failCount = 0\n while not success and failCount < 10:\n try:\n iterSolns = _grasp.run_grasp_iteration(\n elements=germs_list, greedy_score_fn=scoreFn, rcl_fn=rcl_fn,\n local_score_fn=scoreFn,\n get_neighbors_fn=get_neighbors_fn,\n feasible_fn=_feasible_fn,\n initial_elements=initialWeights, seed=seed,\n verbosity=verbosity)\n\n initialSolns.append(iterSolns[0])\n localSolns.append(iterSolns[1])\n\n success = True\n printer.log('Finished iteration {} of {}.'.format(\n iteration + 1, iterations), 1)\n except Exception as e:\n failCount += 1\n raise e if (failCount == 10) else printer.warning(e)\n\n finalScores = _np.array([finalScoreFn(localSoln)\n for localSoln in localSolns])\n bestSoln = localSolns[_np.argmin(finalScores)]\n\n return (bestSoln, initialSolns, localSolns) if return_all else bestSoln\n", "\"\"\"\nMatplotlib volumetric benchmarking plotting routines.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport numpy as _np\n\ntry:\n import matplotlib.pyplot as _plt\n from matplotlib.colors import ListedColormap as _ListedColormap\n from matplotlib import cm as _cm\n import seaborn as _sns\n\n _sns.set_style('white')\n _sns.set_style('ticks')\n\n # Utility color maps.\n blues = _sns.color_palette(_sns.color_palette(\"Blues\", 200)).as_hex()\n blues[0] = '#ffffff'\n blues = _ListedColormap(blues)\n\n reds = _sns.color_palette(_sns.color_palette(\"Reds\", 200)).as_hex()\n reds[0] = '#ffffff'\n reds = _ListedColormap(reds)\n\n greens = _sns.color_palette(_sns.color_palette(\"Greens\", 200)).as_hex()\n greens[0] = '#ffffff'\n greens = _ListedColormap(greens)\n\n binary_blue = _sns.color_palette(_sns.color_palette(\"Blues\", 200)).as_hex()\n binary_blue[0] = '#ffffff'\n binary_blue = _ListedColormap([binary_blue[0], binary_blue[50]])\n\n spectral = _cm.get_cmap('Spectral')\n\n # The default color map.\n my_cmap = blues\n\nexcept ImportError:\n _plt = None\n _sns = None\n my_cmap = None\n\n\ndef empty_volumetric_plot(figsize=None, y_values=None, x_values=None, title=None, xlabel='Depth', ylabel='Width'):\n \"\"\"\n Creates an empty volumetric plot with just the axes set.\n\n Parameters\n ----------\n figsize : tuple or None, optional\n The figure size.\n\n y_values : list or None, optional\n The y-axis values, typically corresponding to circuit widths.\n\n x_values : list or None, optional\n The x-axis values, typically corresponding to circuit depths.\n\n title : string or None, optional\n Plot title\n\n xlabel : string, optional\n x-axis label\n\n ylabel : string, optional\n y-axis label.\n\n Return\n ------\n fig, ax : matplolib fig and ax.\n \"\"\"\n if _plt is None or _sns is None:\n raise ValueError((\"While not a core requirement of pyGSTi, Matplotlib and Seaborn are \"\n \"required to generate VB plots. It looks like you \"\n \"don't have them installed on your system (it failed to import).\"))\n\n fig, ax = _plt.subplots(figsize=figsize)\n ax.set_aspect('equal')\n _plt.xlabel(xlabel, fontsize=20)\n _plt.ylabel(ylabel, fontsize=20)\n _plt.title(title, fontsize=24, y=1.02)\n _plt.xlim(-1, len(x_values))\n _plt.ylim(-1, len(y_values))\n depth_labels = [str(d)[0:len(str(d)) - ((len(str(d)) - 1) // 3) * 3]\n + ['', 'k', 'M', 'G'][(len(str(d)) - 1) // 3] for d in x_values]\n _plt.xticks(range(len(x_values)), depth_labels, rotation=-60, fontsize=14)\n _plt.yticks(range(len(y_values)), y_values, fontsize=14)\n\n _sns.despine()\n\n return fig, ax\n\n\ndef _get_xy(data, y_values=None, x_values=None):\n # Helper function for setting the x and y axes of VB plots.\n if x_values is None:\n x_values = list(set([shape[0] for shape in data.keys()]))\n x_values.sort()\n if y_values is None:\n y_values = list(set([shape[1] for shape in data.keys()]))\n y_values.sort()\n\n return y_values, x_values\n\n\ndef volumetric_plot(data, y_values=None, x_values=None, title=None, fig=None, ax=None,\n cmap=my_cmap, color=None, flagQV=False, qv_threshold=None,\n figsize=(10, 10), scale=1., centerscale=1., linescale=1.,\n pass_threshold=0, show_threshold=0):\n \"\"\"\n Creates a volumetric benchmarking plot.\n \"\"\"\n y_values, x_values = _get_xy(data, y_values, x_values)\n\n if fig is None:\n fig, ax = empty_volumetric_plot(figsize=figsize, y_values=y_values, x_values=x_values, title=title)\n \n if qv_threshold is None:\n qv_threshold = pass_threshold\n \n if color is not None:\n cmap = None\n point_color = color\n \n for indw, w in enumerate(y_values):\n for indd, d in enumerate(x_values):\n \n edgecolor = 'k'\n linewidth = 1 * linescale\n datapoint = data.get((d, w), None)\n\n if (datapoint is not None) and (not _np.isnan(datapoint)):\n\n if w == d and flagQV:\n if datapoint > qv_threshold:\n edgecolor = 'r'\n linewidth = 5 * scale * linescale\n\n if datapoint >= show_threshold:\n if datapoint < pass_threshold:\n datapoint = 0\n \n if color is None:\n point_color = [datapoint]\n ax.scatter([indd], [indw], marker=\"s\", s=280 * scale - 30 * linewidth, c=point_color,\n cmap=cmap, vmin=0, vmax=1, edgecolor=edgecolor, linewidth=linewidth)\n\n return fig, ax\n\n\ndef volumetric_boundary_plot(data, y_values=None, x_values=None, boundary=None, threshold=.5, \n missing_data_action='continue', monotonic=True, color='k', linewidth=4,\n linestyle='-', dashing=None, fig=None, ax=None, figsize=None, title=None,\n label=None):\n \"\"\"\n Creates a volumetric benchmarking boundary plot, that displays boundary at which the given data \n drops below the specified threshold\n \"\"\"\n y_values, x_values = _get_xy(data, y_values, x_values)\n \n if fig is None:\n fig, ax = empty_volumetric_plot(figsize=figsize, y_values=y_values, x_values=x_values, title=title)\n \n if boundary is not None:\n boundaries = _np.array([-1 if boundary[d] == 0 else y_values.index(boundary[d]) for d in x_values])\n # x-values for a jagged line that outlines the boxes (one pair for each box) \n xvals = [y for x in range(len(x_values)) for y in [x - .5, x + .5]]\n # y-values for a jagged line that outlines the boxes (one pair for each box)\n yvals = [y + .5 for boundary in boundaries for y in [boundary, boundary]]\n\n else:\n # For each depth, find the widest circuit that achieves the threshold performance (return -1 if none)\n if missing_data_action == 'none':\n boundaries = _np.array([_np.max([-1] + [y_values.index(w) for w in y_values if (d, w) in data.keys()\n and data[d, w] >= threshold]) for d in x_values])\n # x-values for a jagged line that outlines the boxes (one pair for each box)\n xvals = [y for x in range(len(x_values)) for y in [x - .5, x + .5]]\n # y-values for a jagged line that outlines the boxes (one pair for each box)\n yvals = [y + .5 for boundary in boundaries for y in [boundary, boundary]]\n\n elif missing_data_action == 'continue' or missing_data_action == 'hedge':\n boundaries = []\n d = x_values[0]\n boundary_at_d = _np.max([-1] + [y_values.index(w) for w in y_values if (d, w) in data.keys()\n and data[d, w] >= threshold])\n boundaries.append(boundary_at_d)\n previous_boundary = boundary_at_d\n hedged_x_values = []\n for i, d in enumerate(x_values[1:]):\n max_width_at_depth = _np.max([-1] + [w for w in y_values if (d, w) in data.keys()])\n if max_width_at_depth < previous_boundary:\n boundary_at_d = previous_boundary\n hedged_x_values.append(d)\n else:\n boundary_at_d = _np.max([-1] + [y_values.index(w) for w in y_values if (d, w) in data.keys()\n and data[d, w] >= threshold])\n boundaries.append(boundary_at_d)\n previous_boundary = boundary_at_d\n\n if missing_data_action == 'continue':\n # x-values for a jagged line that outlines the boxes (one pair for each box) \n xvals = [y for x in range(len(x_values)) for y in [x - .5, x + .5]]\n # y-values for a jagged line that outlines the boxes (one pair for each box)\n yvals = [y + .5 for boundary in boundaries for y in [boundary, boundary]]\n \n elif missing_data_action == 'hedge':\n # x-values for a jagged line that outlines the boxes (one pair for each box)\n xvals = []\n yvals = []\n last_xval = -0.5\n for x, boundary in zip(range(len(x_values)), boundaries):\n d = x_values[x]\n if d in hedged_x_values:\n # Only hedge when there's actually some data at larger x_values.\n if not all([d in hedged_x_values for d in x_values[x:]]):\n xvals += [last_xval, x]\n yvals += [boundary + .5, boundary + .5]\n else:\n xvals += [last_xval, x + .5]\n yvals += [boundary + .5, boundary + .5]\n last_xval = xvals[-1]\n\n if monotonic:\n monotonic_yvals = [yvals[0]]\n for y in yvals[1:]:\n if y > monotonic_yvals[-1]:\n monotonic_yvals.append(monotonic_yvals[-1])\n else:\n monotonic_yvals.append(y)\n yvals = monotonic_yvals\n\n line, = ax.plot(xvals, yvals, color, linewidth=linewidth, label=label, linestyle=linestyle)\n if dashing is not None:\n line.set_dashes(dashing)\n return fig, ax\n\n\ndef capability_region_plot(vbdataframe, metric='polarization', threshold=1 / _np.e, significance=0.05, figsize=(10, 10),\n scale=1., title=None, colors=None):\n \"\"\"\n Creates a capability regions plot from a VBDataFrame. Default options creates plots like those shown\n in Fig. 3 of \"Measuring the Capabilities of Quantum Computers\" arXiv:2008.11294.\n \"\"\"\n x_values = vbdataframe.x_values\n y_values = vbdataframe.y_values\n\n fig, ax = empty_volumetric_plot(figsize=figsize, y_values=y_values, x_values=x_values, title=title)\n\n creg = vbdataframe.capability_regions(metric=metric, threshold=threshold, significance=significance, monotonic=True)\n\n # Split the data up into dicts for the three different regions: 'success', 'indeterminate' and 'fail'.\n creg_split = {}\n creg_split['success'] = {(w, d): 1 for (w, d), val in creg.items() if val == 2}\n creg_split['indeterminate'] = {(w, d): 1 for (w, d), val in creg.items() if val == 1}\n creg_split['fail'] = {(w, d): 1 for (w, d), val in creg.items() if val == 0}\n\n if colors is None:\n colors = {'success': [(0.2, 0.6274509803921569, 0.17254901960784313)],\n 'indeterminate': [(0.9921568627450981, 0.7490196078431373, 0.43529411764705883)],\n 'fail': 'w'}\n\n for region in ('success', 'indeterminate', 'fail'):\n fig, ax = volumetric_plot(creg_split[region], y_values=y_values, x_values=x_values, scale=scale, fig=fig, ax=ax,\n color=colors[region])\n\n return fig, ax\n\n\ndef volumetric_distribution_plot(vbdataframe, metric='polarization', threshold=1 / _np.e, hypothesis_test='standard',\n significance=0.05, figsize=(10, 10), scale={'min': 1.95, 'mean': 1, 'max': 0.13},\n title=None, cmap=my_cmap):\n \"\"\"\n Creates volumetric benchmarking plots that display the maximum, mean and minimum of a given figure-of-merit (by\n default, circuit polarization) as a function of circuit shape. This function can be used to create figures like\n those shown in Fig. 1 of \"Measuring the Capabilities of Quantum Computers\" arXiv:2008.11294.\n\n Parameters\n ----------\n vbdataframe : VBDataFrame\n A VBDataFrame object containing the data to be plotted in a VB plot.\n\n metric : string, optional\n The quantity to plot. Default is 'polarization' as used and defined in arXiv:2008.11294. The plot\n will show the maximum, mean, and minimum of this metric at each circuit shape.\n\n threshold : float, optional\n The threshold for \"success\" for the figure-of-merit defined by `metric`. This threshold is used\n to compute the three \"success\" boundaries that are shown in the plot.\n\n hypothesis_test : string, optional\n The type of statistical significance adjustment to apply to the boundaries. The options are\n - 'standard': this reproduces the method used and described in arXiv:2008.11294 (see the \n appendices for details). With this option, there will be a difference between the \n boundary for the minimum and maximum polarization only if there is statistically significant\n evidence in the data for this.\n - 'none': no statistical significance adjustment: all three boundaries show the point at which\n relevant statistic (maximum, mean, minimum) drops below the threshold.\n\n significance : float, optional\n The statistical significance in the hypothesis tests. Only used in `hypothesis_test` is not 'none'.\n\n figsize : tuple, optional\n The figure size\n\n scale : dict, optional\n The scale for the three concentric squares, showing the maximum, mean and minimum.\n\n title : sting, optional\n The figure title.\n\n cmap : ColorMap, optional\n A matplotlib colormap.\n\n Return\n ------\n fig, ax : matplolib fig and ax.\n \"\"\"\n linescale = {'min': 1, 'mean': 0, 'max': 0}\n boundary_color = {'min': '#ff0000', 'mean': '#000000', 'max': '#2ecc71'}\n boundary_dashing = {'min': [1, 1], 'mean': None, 'max': [0.5, 0.5]}\n boundary_linewidth = {'min': 3, 'mean': 6, 'max': 5}\n x_values = vbdataframe.x_values\n y_values = vbdataframe.y_values\n\n fig, ax = empty_volumetric_plot(figsize=figsize, y_values=y_values, x_values=x_values, title=title)\n\n # Dictionary containing the three types of VB data that are used in this plot.\n vb_data = {stat: vbdataframe.vb_data(metric=metric, statistic=stat, no_data_action='discard')\n for stat in ('min', 'mean', 'max')}\n # Used to find the min and max boundaries if they are adjusted for statistical significance.\n capability_regions = vbdataframe.capability_regions(metric=metric, threshold=threshold, significance=significance,\n monotonic=True)\n\n if hypothesis_test == 'standard':\n adjusted_boundaries = ('max', 'min')\n unadjusted_boundaries = ('mean',)\n\n elif hypothesis_test == 'none':\n adjusted_boundaries = ()\n unadjusted_boundaries = ('max', 'mean', 'min',)\n\n else:\n raise ValueError(\"`hypothesis_test` must be 'standard' or 'none'!\")\n\n # Plots the data.\n for statistic in ('min', 'mean', 'max'): \n fig, ax = volumetric_plot(vb_data[statistic], y_values=y_values, x_values=x_values, fig=fig, ax=ax,\n scale=scale[statistic], linescale=linescale[statistic], cmap=cmap)\n \n # Plots the boundaries that have been adjusted for statistical significance.\n for statistic in adjusted_boundaries: \n if statistic == 'max': effective_threshold = 0.99\n elif statistic == 'min': effective_threshold = 1.99\n volumetric_boundary_plot(capability_regions, y_values=y_values, x_values=x_values, threshold=effective_threshold,\n missing_data_action='hedge', fig=fig, ax=ax, linestyle='-',\n color=boundary_color[statistic], linewidth=boundary_linewidth[statistic],\n dashing=boundary_dashing[statistic]) \n \n # Plots the boundaries that are not adjusted for statistical significance.\n for statistic in unadjusted_boundaries:\n volumetric_boundary_plot(vb_data[statistic], y_values=y_values, x_values=x_values, threshold=threshold,\n monotonic=False, missing_data_action='hedge', fig=fig, ax=ax, linestyle='-', \n color=boundary_color[statistic], linewidth=boundary_linewidth[statistic], \n dashing=boundary_dashing[statistic])\n\n return fig, ax\n", "import logging\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\n\nimport unittest\nimport pygsti\nimport numpy as np\nfrom pygsti.modelpacks.legacy import std1Q_XYI\n\nfrom ..testutils import BaseTestCase\n\n\nclass MyTimeDependentIdle(pygsti.modelmembers.operations.DenseOperator):\n \"\"\"And idle that depolarizes over time with a parameterized rate\"\"\"\n def __init__(self, initial_depol_rate):\n #initialize with no noise\n self.need_time = True # maybe torep() won't work unless this is False?\n super(MyTimeDependentIdle,self).__init__(np.identity(4,'d'), \"densitymx\") # this is *super*-operator, so \"densitymx\"\n self.from_vector([initial_depol_rate])\n self.set_time(0.0)\n\n @property\n def num_params(self):\n return 1 # we have two parameters\n\n def to_vector(self):\n return np.array([self.depol_rate],'d') #our parameter vector\n\n def from_vector(self, v, close=False, dirty_value=True):\n #initialize from parameter vector v\n self.depol_rate = v[0]\n self.need_time = True\n self.dirty = dirty_value\n\n def set_time(self,t):\n a = 1.0-min(self.depol_rate*t,1.0)\n self.need_time = False\n\n # .base is a member of DenseOperator and is a numpy array that is\n # the dense Pauli transfer matrix of this operator\n self._ptr[:,:] = np.array([[1, 0, 0, 0],\n [0, a, 0, 0],\n [0, 0, a, 0],\n [0, 0, 0, a]],'d')\n self._ptr_has_changed()\n\n def transform(self, S):\n # Update self with inverse(S) * self * S (used in gauge optimization)\n raise NotImplementedError(\"MyTimeDependentIdle cannot be transformed!\")\n\n\nclass TimeDependentTestCase(BaseTestCase):\n\n def setUp(self):\n super(TimeDependentTestCase, self).setUp()\n\n def test_time_dependent_datagen(self):\n mdl = std1Q_XYI.target_model(\"full TP\",sim_type=\"map\")\n mdl.operations['Gi'] = MyTimeDependentIdle(1.0)\n\n #Create a time-dependent dataset (simulation of time-dependent model):\n circuits = std1Q_XYI.prepStrs + pygsti.circuits.to_circuits([('Gi',), ('Gi', 'Gx', 'Gi', 'Gx')]) # just pick some circuits\n ds = pygsti.data.simulate_data(mdl, circuits, num_samples=100,\n sample_error='none', seed=1234, times=[0,0.1,0.2])\n\n self.assertArraysEqual(ds[('Gi',)].time, np.array([0., 0., 0.1, 0.1, 0.2, 0.2]))\n self.assertArraysEqual(ds[('Gi',)].reps, np.array([100., 0., 95., 5., 90., 10.]))\n self.assertArraysEqual(ds[('Gi',)].outcomes, [('0',), ('1',), ('0',), ('1',), ('0',), ('1',)])\n\n # sparse data\n ds2 = pygsti.data.simulate_data(mdl, circuits, num_samples=100,\n sample_error='none', seed=1234, times=[0,0.1,0.2],\n record_zero_counts=False)\n self.assertArraysEqual(ds2[('Gi',)].time, np.array([0., 0.1, 0.1, 0.2, 0.2]))\n self.assertArraysEqual(ds2[('Gi',)].reps, np.array([100., 95., 5., 90., 10.]))\n self.assertArraysEqual(ds2[('Gi',)].outcomes, [('0',), ('0',), ('1',), ('0',), ('1',)])\n\n def test_time_dependent_gst_staticdata(self):\n\n #run GST in a time-dependent mode:\n prep_fiducials, meas_fiducials = std1Q_XYI.prepStrs, std1Q_XYI.effectStrs\n germs = std1Q_XYI.germs\n maxLengths = [1, 2]\n\n target_model = std1Q_XYI.target_model(\"full TP\", sim_type=\"map\")\n mdl_datagen = target_model.depolarize(op_noise=0.01, spam_noise=0.001)\n edesign = pygsti.protocols.StandardGSTDesign(target_model.create_processor_spec(), prep_fiducials,\n meas_fiducials, germs, maxLengths)\n\n # *sparse*, time-independent data\n ds = pygsti.data.simulate_data(mdl_datagen, edesign.all_circuits_needing_data, num_samples=10,\n sample_error=\"binomial\", seed=1234, times=[0],\n record_zero_counts=False)\n data = pygsti.protocols.ProtocolData(edesign, ds)\n\n target_model.sim = pygsti.forwardsims.MapForwardSimulator(max_cache_size=0) # No caching allowed for time-dependent calcs\n self.assertEqual(ds.degrees_of_freedom(aggregate_times=False), 126)\n\n builders = pygsti.protocols.GSTObjFnBuilders([pygsti.objectivefns.TimeDependentPoissonPicLogLFunction.builder()], [])\n gst = pygsti.protocols.GateSetTomography(target_model, gaugeopt_suite=None,\n objfn_builders=builders)\n results = gst.run(data)\n\n # Normal GST used as a check - should get same answer since data is time-independent\n results2 = pygsti.run_long_sequence_gst(ds, target_model, prep_fiducials, meas_fiducials,\n germs, maxLengths, verbosity=3,\n advanced_options={'starting_point': 'target',\n 'always_perform_mle': True,\n 'only_perform_mle': True}, gauge_opt_params=False)\n\n #These check FAIL on some TravisCI machines for an unknown reason (but passes on Eriks machines) -- figure out why this is in FUTURE.\n #Check that \"timeDependent=True\" mode matches behavior or \"timeDependent=False\" mode when model and data are time-independent.\n #self.assertAlmostEqual(pygsti.tools.chi2(results.estimates['default'].models['iteration estimates'][0], results.dataset, results.circuit_lists['iteration'][0]),\n # pygsti.tools.chi2(results2.estimates['default'].models['iteration estimates'][0], results2.dataset, results2.circuit_lists['iteration'][0]),\n # places=0)\n #self.assertAlmostEqual(pygsti.tools.chi2(results.estimates['default'].models['iteration estimates'][1], results.dataset, results.circuit_lists['iteration'][1]),\n # pygsti.tools.chi2(results2.estimates['default'].models['iteration estimates'][1], results2.dataset, results2.circuit_lists['iteration'][1]),\n # places=0)\n #self.assertAlmostEqual(pygsti.tools.two_delta_logl(results.estimates['default'].models['final iteration estimate'], results.dataset),\n # pygsti.tools.two_delta_logl(results2.estimates['default'].models['final iteration estimate'], results2.dataset),\n # places=0)\n\n def test_time_dependent_gst(self):\n #run GST in a time-dependent mode:\n prep_fiducials, meas_fiducials = std1Q_XYI.prepStrs, std1Q_XYI.effectStrs\n germs = std1Q_XYI.germs\n maxLengths = [1, 2]\n\n target_model = std1Q_XYI.target_model(\"full TP\",sim_type=\"map\")\n mdl_datagen = target_model.depolarize(op_noise=0.01, spam_noise=0.001)\n mdl_datagen.operations['Gi'] = MyTimeDependentIdle(1.0)\n edesign = pygsti.protocols.StandardGSTDesign(target_model.create_processor_spec(), prep_fiducials,\n meas_fiducials, germs, maxLengths)\n\n # *sparse*, time-independent data\n ds = pygsti.data.simulate_data(mdl_datagen, edesign.all_circuits_needing_data, num_samples=1000,\n sample_error=\"binomial\", seed=1234, times=[0, 0.1, 0.2],\n record_zero_counts=False)\n self.assertEqual(ds.degrees_of_freedom(aggregate_times=False), 500)\n\n target_model.operations['Gi'] = MyTimeDependentIdle(0.0) # start assuming no time dependent decay 0\n target_model.sim = pygsti.forwardsims.MapForwardSimulator(max_cache_size=0) # No caching allowed for time-dependent calcs\n\n builders = pygsti.protocols.GSTObjFnBuilders([pygsti.objectivefns.TimeDependentPoissonPicLogLFunction.builder()], [])\n gst = pygsti.protocols.GateSetTomography(target_model, gaugeopt_suite=None,\n objfn_builders=builders, optimizer={'tol': 1e-4})\n data = pygsti.protocols.ProtocolData(edesign, ds)\n results = gst.run(data)\n\n #we should recover the 1.0 decay we put into mdl_datagen['Gi']:\n final_mdl = results.estimates['GateSetTomography'].models['final iteration estimate']\n print(\"Final decay rate = \", final_mdl.operations['Gi'].to_vector())\n #self.assertAlmostEqual(final_mdl.operations['Gi'].to_vector()[0], 1.0, places=1)\n self.assertAlmostEqual(final_mdl.operations['Gi'].to_vector()[0], 1.0, delta=0.1) # weaker b/c of unknown TravisCI issues\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n", "\"\"\"\nDefines the MatrixCOPALayout class.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport collections as _collections\n\nimport numpy as _np\n\nfrom pygsti.layouts.distlayout import DistributableCOPALayout as _DistributableCOPALayout\nfrom pygsti.layouts.distlayout import _DistributableAtom\nfrom pygsti.layouts.evaltree import EvalTree as _EvalTree\nfrom pygsti.circuits.circuitlist import CircuitList as _CircuitList\nfrom pygsti.tools import listtools as _lt\nfrom pygsti.tools import slicetools as _slct\n\n\nclass _MatrixCOPALayoutAtom(_DistributableAtom):\n \"\"\"\n The atom (\"atomic unit\") for dividing up the element dimension in a :class:`MatrixCOPALayout`.\n\n Parameters\n ----------\n unique_complete_circuits : list\n A list that contains *all* the \"complete\" circuits for the parent layout. This\n atom only owns a subset of these, as given by `group` below.\n\n unique_nospam_circuits : list\n A list that contains the unique circuits within `unique_complete_circuits` once\n their state preparations and measurements are removed. A subset of these circuits\n (see `group` below) are what fundamentally define the circuit outcomes that this atom\n includes: it includes *all* the circuit outcomes of those circuits.\n\n circuits_by_unique_nospam_circuits : dict\n A dictionary with keys equal to the elements of `unique_nospam_circuits` and values\n that are lists of indices into `unique_complete_circuits`. Thus, this dictionary\n maps each distinct circuit-without-SPAM circuit to the list of complete circuits\n within `unique_complete_circuits` that correspond to it.\n\n ds_circuits : list\n A list of circuits parallel to `unique_complete_circuits` of these circuits\n as they should be accessed from `dataset`. This applies any aliases and\n removes implied SPAM elements relative to `unique_complete_circuits`.\n\n group : set\n The set of indices into `unique_nospam_circuits` that define the circuit\n outcomes owned by this atom.\n\n helpful_scratch : set\n A set of indices into `unique_nospam_circuits` that specify circuits that\n aren't owned by this atom but are helpful in building up an efficient evaluation\n tree.\n\n model : Model\n The model being used to construct this layout. Used for expanding instruments\n within the circuits.\n\n dataset : DataSet\n The dataset, used to include only observed circuit outcomes in this atom\n and therefore the parent layout.\n \"\"\"\n\n def __init__(self, unique_complete_circuits, unique_nospam_circuits, circuits_by_unique_nospam_circuits,\n ds_circuits, group, helpful_scratch, model, dataset):\n\n #Note: group gives unique_nospam_circuits indices, which circuits_by_unique_nospam_circuits\n # turns into \"unique complete circuit\" indices, which the layout via it's to_unique can map\n # to original circuit indices.\n def add_expanded_circuits(indices, add_to_this_dict):\n _expanded_nospam_circuit_outcomes = add_to_this_dict\n for i in indices:\n nospam_c = unique_nospam_circuits[i]\n for unique_i in circuits_by_unique_nospam_circuits[nospam_c]: # \"unique\" circuits: add SPAM to nospam_c\n observed_outcomes = None if (dataset is None) else dataset[ds_circuits[unique_i]].unique_outcomes\n expc_outcomes = unique_complete_circuits[unique_i].expand_instruments_and_separate_povm(\n model, observed_outcomes)\n #Note: unique_complete_circuits may have duplicates (they're only unique *pre*-completion)\n\n for sep_povm_c, outcomes in expc_outcomes.items(): # for each expanded cir from unique_i-th circuit\n prep_lbl = sep_povm_c.circuit_without_povm[0]\n exp_nospam_c = sep_povm_c.circuit_without_povm[1:] # sep_povm_c *always* has prep lbl\n spam_tuples = [(prep_lbl, elabel) for elabel in sep_povm_c.full_effect_labels]\n outcome_by_spamtuple = _collections.OrderedDict([(st, outcome)\n for st, outcome in zip(spam_tuples, outcomes)])\n\n #Now add these outcomes to `expanded_nospam_circuit_outcomes` - note that multiple \"unique_i\"'s\n # may exist for the same expanded & without-spam circuit (exp_nospam_c) and so we need to\n # keep track of a list if unique_i indices for each circut and spam tuple below.\n if exp_nospam_c not in _expanded_nospam_circuit_outcomes:\n _expanded_nospam_circuit_outcomes[exp_nospam_c] = _collections.OrderedDict(\n [(st, (outcome, [unique_i])) for st, outcome in zip(spam_tuples, outcomes)])\n else:\n for st, outcome in outcome_by_spamtuple.items():\n if st in _expanded_nospam_circuit_outcomes[exp_nospam_c]:\n existing_outcome, existing_unique_is = \\\n _expanded_nospam_circuit_outcomes[exp_nospam_c][st]\n assert(existing_outcome == outcome), \"Outcome should be same when spam tuples are!\"\n assert(unique_i not in existing_unique_is) # SLOW - remove?\n existing_unique_is.append(unique_i)\n else:\n _expanded_nospam_circuit_outcomes[exp_nospam_c][st] = (outcome, [unique_i])\n\n # keys = expanded circuits w/out SPAM layers; values = spamtuple => (outcome, unique_is) dictionary that\n # keeps track of which \"unique\" circuit indices having each spamtuple / outcome.\n expanded_nospam_circuit_outcomes = _collections.OrderedDict()\n add_expanded_circuits(group, expanded_nospam_circuit_outcomes)\n expanded_nospam_circuits = _collections.OrderedDict(\n [(i, cir) for i, cir in enumerate(expanded_nospam_circuit_outcomes.keys())])\n\n # add suggested scratch to the \"final\" elements as far as the tree creation is concerned\n # - this allows these scratch element to help balance the tree.\n expanded_nospam_circuit_outcomes_plus_scratch = expanded_nospam_circuit_outcomes.copy()\n add_expanded_circuits(helpful_scratch, expanded_nospam_circuit_outcomes_plus_scratch)\n expanded_nospam_circuits_plus_scratch = _collections.OrderedDict(\n [(i, cir) for i, cir in enumerate(expanded_nospam_circuit_outcomes_plus_scratch.keys())])\n\n double_expanded_nospam_circuits_plus_scratch = _collections.OrderedDict()\n for i, cir in expanded_nospam_circuits_plus_scratch.items():\n cir = cir.copy(editable=True)\n cir.expand_subcircuits() # expand sub-circuits for a more efficient tree\n cir.done_editing()\n double_expanded_nospam_circuits_plus_scratch[i] = cir\n\n self.tree = _EvalTree.create(double_expanded_nospam_circuits_plus_scratch)\n #print(\"Atom tree: %d circuits => tree of size %d\" % (len(expanded_nospam_circuits), len(self.tree)))\n\n self._num_nonscratch_tree_items = len(expanded_nospam_circuits) # put this in EvalTree?\n\n # self.tree's elements give instructions for evaluating (\"caching\") no-spam quantities (e.g. products).\n # Now we assign final element indices to the circuit outcomes corresponding to a given no-spam (\"tree\")\n # quantity plus a spam-tuple. We order the final indices so that all the outcomes corresponding to a\n # given spam-tuple are contiguous.\n\n tree_indices_by_spamtuple = _collections.OrderedDict() # \"tree\" indices index expanded_nospam_circuits\n for i, c in expanded_nospam_circuits.items():\n for spam_tuple in expanded_nospam_circuit_outcomes[c].keys():\n if spam_tuple not in tree_indices_by_spamtuple: tree_indices_by_spamtuple[spam_tuple] = []\n tree_indices_by_spamtuple[spam_tuple].append(i)\n\n #Assign element indices, starting at `offset`\n # now that we know how many of each spamtuple there are, assign final element indices.\n local_offset = 0\n self.indices_by_spamtuple = _collections.OrderedDict() # values are (element_indices, tree_indices) tuples.\n for spam_tuple, tree_indices in tree_indices_by_spamtuple.items():\n self.indices_by_spamtuple[spam_tuple] = (slice(local_offset, local_offset + len(tree_indices)),\n _slct.list_to_slice(tree_indices, array_ok=True))\n local_offset += len(tree_indices)\n #TODO: allow tree_indices to be None or a slice?\n\n element_slice = None # slice(offset, offset + local_offset) # *global* (of parent layout) element-index slice\n num_elements = local_offset\n\n elindex_outcome_tuples = _collections.OrderedDict([\n (unique_i, list()) for unique_i in range(len(unique_complete_circuits))])\n\n for spam_tuple, (element_indices, tree_indices) in self.indices_by_spamtuple.items():\n for elindex, tree_index in zip(_slct.indices(element_indices), _slct.to_array(tree_indices)):\n outcome_by_spamtuple = expanded_nospam_circuit_outcomes[expanded_nospam_circuits[tree_index]]\n outcome, unique_is = outcome_by_spamtuple[spam_tuple]\n for unique_i in unique_is:\n elindex_outcome_tuples[unique_i].append((elindex, outcome)) # *local* element indices\n self.elindex_outcome_tuples = elindex_outcome_tuples\n\n super().__init__(element_slice, num_elements)\n\n def nonscratch_cache_view(self, a, axis=None):\n \"\"\"\n Create a view of array `a` restricting it to only the *final* results computed by this tree.\n\n This need not be the entire array because there could be intermediate results\n (e.g. \"scratch space\") that are excluded.\n\n Parameters\n ----------\n a : ndarray\n An array of results computed using this EvalTree,\n such that the `axis`-th dimension equals the full\n length of the tree. The other dimensions of `a` are\n unrestricted.\n\n axis : int, optional\n Specified the axis along which the selection of the\n final elements is performed. If None, than this\n selection if performed on flattened `a`.\n\n Returns\n -------\n ndarray\n Of the same shape as `a`, except for along the\n specified axis, whose dimension has been reduced\n to filter out the intermediate (non-final) results.\n \"\"\"\n if axis is None:\n return a[0:self._num_nonscratch_tree_items]\n else:\n sl = [slice(None)] * a.ndim\n sl[axis] = slice(0, self._num_nonscratch_tree_items)\n ret = a[tuple(sl)]\n assert(ret.base is a or ret.base is a.base) # check that what is returned is a view\n assert(ret.size == 0 or _np.may_share_memory(ret, a))\n return ret\n\n @property\n def cache_size(self):\n \"\"\"The cache size of this atom.\"\"\"\n return len(self.tree)\n\n\nclass MatrixCOPALayout(_DistributableCOPALayout):\n \"\"\"\n A circuit outcome probability array (COPA) layout for circuit simulation by process matrix multiplication.\n\n A distributed layout that divides a list of circuits into several \"evaluation trees\"\n that compute subsets of the circuit outcomes by multiplying together process matrices.\n Often these evaluation trees correspond to available processors, but it can be useful\n to divide computations in order to lessen the amount of intermediate memory required.\n\n MatrixCOPALayout instances create and store the decomposition of a list of circuits into\n a sequence of 2-term products of smaller strings. Ideally, this sequence would\n prescribe the way to obtain the entire list of circuits, starting with just the single\n gates, using the fewest number of multiplications, but this optimality is not\n guaranteed.\n\n Parameters\n ----------\n circuits : list\n A list of:class:`Circuit` objects representing the circuits this layout will include.\n\n model : Model\n The model that will be used to compute circuit outcome probabilities using this layout.\n This model is used to complete and expand the circuits in `circuits`.\n\n dataset : DataSet, optional\n If not None, restrict the circuit outcomes stored by this layout to only the\n outcomes observed in this data set.\n\n num_sub_trees : int, optional\n The number of groups (\"sub-trees\") to divide the circuits into. This is the\n number of *atoms* for this layout.\n\n num_tree_processors : int, optional\n The number of atom-processors, i.e. groups of processors that process sub-trees.\n\n num_param_dimension_processors : tuple, optional\n A 1- or 2-tuple of integers specifying how many parameter-block processors are\n used when dividing the physical processors into a grid. The first and second\n elements correspond to counts for the first and second parameter dimensions,\n respecively.\n\n param_dimensions : tuple, optional\n The number of parameters along each parameter dimension. Can be an\n empty, 1-, or 2-tuple of integers which dictates how many parameter dimensions this\n layout supports.\n\n param_dimension_blk_sizes : tuple, optional\n The parameter block sizes along each present parameter dimension, so this should\n be the same shape as `param_dimensions`. A block size of `None` means that there\n should be no division into blocks, and that each block processor computes all of\n its parameter indices at once.\n\n resource_alloc : ResourceAllocation, optional\n The resources available for computing circuit outcome probabilities.\n\n verbosity : int or VerbosityPrinter\n Determines how much output to send to stdout. 0 means no output, higher\n integers mean more output.\n \"\"\"\n\n def __init__(self, circuits, model, dataset=None, num_sub_trees=None, num_tree_processors=1,\n num_param_dimension_processors=(), param_dimensions=(),\n param_dimension_blk_sizes=(), resource_alloc=None, verbosity=0):\n\n #OUTDATED: TODO - revise this:\n # 1. pre-process => get complete circuits => spam-tuples list for each no-spam circuit (no expanding yet)\n # 2. decide how to divide no-spam circuits into groups corresponding to sub-strategies\n # - create tree of no-spam circuits (may contain instruments, etc, just not SPAM)\n # - heuristically find groups of circuits that meet criteria\n # 3. separately create a tree of no-spam expanded circuits originating from each group => self.atoms\n # 4. assign \"cache\" and element indices so that a) all elements of a tree are contiguous\n # and b) elements with the same spam-tuple are continguous.\n # 5. initialize base class with given per-original-circuit element indices.\n\n unique_circuits, to_unique = self._compute_unique_circuits(circuits)\n aliases = circuits.op_label_aliases if isinstance(circuits, _CircuitList) else None\n ds_circuits = _lt.apply_aliases_to_circuits(unique_circuits, aliases)\n unique_complete_circuits = [model.complete_circuit(c) for c in unique_circuits]\n #Note: \"unique\" means a unique circuit *before* circuit-completion, so there could be duplicate\n # \"unique circuits\" after completion, e.g. \"rho0Gx\" and \"Gx\" could both complete to \"rho0GxMdefault_0\".\n\n circuits_by_unique_nospam_circuits = _collections.OrderedDict()\n for i, c in enumerate(unique_complete_circuits):\n _, nospam_c, _ = model.split_circuit(c)\n if nospam_c in circuits_by_unique_nospam_circuits:\n circuits_by_unique_nospam_circuits[nospam_c].append(i)\n else:\n circuits_by_unique_nospam_circuits[nospam_c] = [i]\n unique_nospam_circuits = list(circuits_by_unique_nospam_circuits.keys())\n\n # Split circuits into groups that will make good subtrees (all procs do this)\n max_sub_tree_size = None # removed from being an argument (unused)\n if (num_sub_trees is not None and num_sub_trees > 1) or max_sub_tree_size is not None:\n circuit_tree = _EvalTree.create(unique_nospam_circuits)\n groups, helpful_scratch = circuit_tree.find_splitting(len(unique_nospam_circuits),\n max_sub_tree_size, num_sub_trees, verbosity - 1)\n #print(\"%d circuits => tree of size %d\" % (len(unique_nospam_circuits), len(circuit_tree)))\n else:\n groups = [set(range(len(unique_nospam_circuits)))]\n helpful_scratch = [set()]\n # (elements of `groups` contain indices into `unique_nospam_circuits`)\n\n # Divide `groups` into num_tree_processors roughly equal sets (each containing\n # potentially multiple groups)\n #my_group_indices, group_owners, grp_subcomm = self._distribute(num_tree_processors, len(groups),\n # resource_alloc, verbosity)\n #my_group_indices = set(my_group_indices)\n\n #my_atoms = []\n #elindex_outcome_tuples = _collections.OrderedDict([\n # (orig_i, list()) for orig_i in range(len(unique_circuits))])\n #\n #offset = 0\n #for i, (group, helpful_scratch_group) in enumerate(zip(groups, helpful_scratch)):\n # if i not in my_group_indices: continue\n # my_atoms.append(_MatrixCOPALayoutAtom(unique_complete_circuits, unique_nospam_circuits,\n # circuits_by_unique_nospam_circuits, ds_circuits,\n # group, helpful_scratch_group, model, dataset, offset,\n # elindex_outcome_tuples))\n # offset += my_atoms[-1].num_elements\n\n def _create_atom(args):\n group, helpful_scratch_group = args\n return _MatrixCOPALayoutAtom(unique_complete_circuits, unique_nospam_circuits,\n circuits_by_unique_nospam_circuits, ds_circuits,\n group, helpful_scratch_group, model, dataset)\n\n super().__init__(circuits, unique_circuits, to_unique, unique_complete_circuits,\n _create_atom, list(zip(groups, helpful_scratch)), num_tree_processors,\n num_param_dimension_processors, param_dimensions,\n param_dimension_blk_sizes, resource_alloc, verbosity)\n", "\"\"\"\nColormap and derived class definitions\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport numpy as _np\nfrom scipy.stats import chi2 as _chi2\n\nfrom pygsti.baseobjs.smartcache import smart_cached\n\n\n@smart_cached\ndef _vnorm(x, vmin, vmax):\n #Perform linear mapping from [vmin,vmax] to [0,1]\n # (which is just a *part* of the full mapping performed)\n if _np.isclose(vmin, vmax): return _np.ma.zeros(x.shape, 'd')\n return _np.clip((x - vmin) / (vmax - vmin), 0.0, 1.0)\n\n\n@smart_cached\ndef to_rgb_array(color_str):\n \"\"\"\n Convert a color string, such as `\"rgb(0,255,128)\"` or `\"#00FF88\"` to a numpy array of length 3.\n\n Parameters\n ----------\n color_str : str\n A color string, e.g. `\"rgb(0,255,128)\"` or `\"#00FF88\"`.\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n color_str = color_str.strip() # remove any whitespace\n if color_str.startswith('#') and len(color_str) >= 7:\n r, g, b = color_str[1:3], color_str[3:5], color_str[5:7]\n r = float(int(r, 16))\n g = float(int(g, 16))\n b = float(int(b, 16))\n rgb = r, g, b\n elif color_str.startswith('rgb(') and color_str.endswith(')'):\n tupstr = color_str[len('rgb('):-1]\n rgb = [float(x) for x in tupstr.split(',')]\n elif color_str.startswith('rgba(') and color_str.endswith(')'):\n tupstr = color_str[len('rgba('):-1]\n rgb = [float(x) for x in tupstr.split(',')[0:3]] # ignore alpha\n else:\n raise ValueError(\"Cannot convert color_str = \", color_str)\n return _np.array(rgb)\n\n\ndef interpolate_plotly_colorscale(plotly_colorscale, normalized_value):\n \"\"\"\n Evaluates plotly colorscale at a particular value.\n\n This function linearly interpolates between the colors of a\n Plotly colorscale.\n\n Parameters\n ----------\n plotly_colorscale : list\n A Plotly colorscale (list of `[val, color]`) elements where\n `val` is a float between 0 and 1, and `color` is any acceptable\n Plotly color value (e.g. `rgb(0,100,255)`, `#0033FF`, etc.).\n\n normalized_value : float\n The value (between 0 and 1) to compute the color for.\n\n Returns\n -------\n str\n A string representation of the plotly color of the form `\"rgb(R,G,B)\"`.\n \"\"\"\n for i, (val, color) in enumerate(plotly_colorscale[:-1]):\n next_val, next_color = plotly_colorscale[i + 1]\n if val <= normalized_value < next_val:\n rgb = to_rgb_array(color)\n next_rgb = to_rgb_array(next_color)\n v = (normalized_value - val) / (next_val - val)\n interp_rgb = (1.0 - v) * rgb + v * next_rgb\n break\n else:\n val, color = plotly_colorscale[-1]\n assert(val <= normalized_value)\n interp_rgb = to_rgb_array(color)\n return 'rgb(%d,%d,%d)' % (int(round(interp_rgb[0])),\n int(round(interp_rgb[1])),\n int(round(interp_rgb[2])))\n\n\nclass Colormap(object):\n \"\"\"\n A color map which encapsulates a plotly colorscale with a normalization.\n\n This class also contains additional functionality such as the ability to\n compute the color corresponding to a particular value and extract matplotlib\n colormap and normalization objects.\n\n Parameters\n ----------\n rgb_colors : list\n A list of `[val, (R,G,B)]` elements where `val` is a floating point\n number between 0 and 1 (plotly maps the post-'normalized' data linearly\n onto the interval [0,1] before mapping to a color), and `R`,`G`,and `B`\n are red, green, and blue floating point values in [0,1]. The color will\n be interpolated between the different \"point\" elements in this list.\n\n hmin : float\n The minimum post-normalized values to be used for the heatmap.\n That is, `hmin` is the value (after `normalize` has been\n called) assigned the \"0.0\"-valued color in `rgb_colors`.\n\n hmax : float\n The maximum post-normalized values to be used for the heatmap.\n That is, `hmax` is the value (after `normalize` has been\n called) assigned the \"1.0\"-valued color in `rgb_colors`.\n\n invalid_color : tuple, optional\n If not None, an (R,G,B) tuple of values in [0,1] specifying the\n color to use for *normalized* values (which usually should be\n in [0,1]) that lie outside the [0,1] range of `rgb_colors`.\n \"\"\"\n\n def __init__(self, rgb_colors, hmin, hmax, invalid_color=None):\n \"\"\"\n Create a new Colormap.\n\n Parameters\n ----------\n rgb_colors : list\n A list of `[val, (R,G,B)]` elements where `val` is a floating point\n number between 0 and 1 (plotly maps the post-'normalized' data linearly\n onto the interval [0,1] before mapping to a color), and `R`,`G`,and `B`\n are red, green, and blue floating point values in [0,1]. The color will\n be interpolated between the different \"point\" elements in this list.\n\n hmin, hmax : float\n The minimum and maximum post-normalized values to be used for the\n heatmap. That is, `hmin` is the value (after `normalize` has been\n called) assigned the \"0.0\"-valued color in `rgb_colors` and `hmax`\n similarly for the \"1.0\"-valued color.\n\n invalid_color : tuple, optional\n If not None, an (R,G,B) tuple of values in [0,1] specifying the\n color to use for *normalized* values (which usually should be\n in [0,1]) that lie outside the [0,1] range of `rgb_colors`.\n \"\"\"\n\n self.rgb_colors = rgb_colors\n self.invalid_color = invalid_color\n self.hmin = hmin\n self.hmax = hmax\n\n def _brightness(self, r, g, b):\n # Perceived brightness calculation from http://alienryderflex.com/hsp.html\n return _np.sqrt(0.299 * r**2 + 0.587 * g**2 + 0.114 * b**2)\n\n def normalize(self, value):\n \"\"\"\n Normalize value as it would be prior to linearly interpolating onto the [0,1] range of the color map.\n\n In this case, no additional normalization is performed, so this\n function just returns `value`.\n\n Parameters\n ----------\n value : float or numpy.ndarray\n The value to normalize.\n\n Returns\n -------\n float or numpy.ndarray\n \"\"\"\n #Default behavior for derived classes: no \"normalization\" is done\n # here because plotly automatically maps (linearly) the interval\n # between a heatmap's zmin and zmax to [0,1].\n return value\n\n def besttxtcolor(self, value):\n \"\"\"\n Return the better text color, \"black\" or \"white\", given an un-normalized `value`.\n\n Parameters\n ----------\n value : float\n An un-normalized value.\n\n Returns\n -------\n str\n \"\"\"\n z = _vnorm(self.normalize(value), self.hmin, self.hmax) # norm_value <=> color\n for i in range(1, len(self.rgb_colors)):\n if z < self.rgb_colors[i][0]:\n z1, rgb1 = self.rgb_colors[i - 1]\n z2, rgb2 = self.rgb_colors[i]\n alpha = (z - z1) / (z2 - z1)\n R, G, B = [rgb1[i] + alpha * (rgb2[i] - rgb1[i]) for i in range(3)]\n break\n else: R, G, B = self.rgb_colors[-1][1] # just take the final color\n\n # Perceived brightness calculation from http://alienryderflex.com/hsp.html\n P = self._brightness(R, G, B)\n #print(\"DB: value = %f (%s), RGB = %f,%f,%f, P=%f (%s)\" % (value,z,R,G,B,P,\"black\" if 0.5 <= P else \"white\"))\n return \"black\" if 0.5 <= P else \"white\"\n\n def create_plotly_colorscale(self):\n \"\"\"\n Construct and return the plotly colorscale of this color map.\n\n Returns\n -------\n list\n A list of `[float_value, \"rgb(R,G,B)\"]` items.\n \"\"\"\n plotly_colorscale = [[z, 'rgb(%d,%d,%d)' %\n (round(r * 255), round(g * 255), round(b * 255))]\n for z, (r, g, b) in self.rgb_colors]\n return plotly_colorscale\n\n def interpolate_color(self, value):\n \"\"\"\n Retrieves the color at a particular colormap value.\n\n This function linearly interpolates between the colors of a\n this colormap's color scale\n\n Parameters\n ----------\n value : float\n The value (before normalization) to compute the color for.\n\n Returns\n -------\n str\n A string representation of the plotly color of the form `\"rgb(R,G,B)\"`.\n \"\"\"\n normalized_value = self.normalize(value)\n\n for i, (val, color) in enumerate(self.rgb_colors[:-1]):\n next_val, next_color = self.rgb_colors[i + 1]\n if val <= normalized_value < next_val:\n rgb = _np.array(color) # r,g,b values as array\n next_rgb = _np.array(next_color)\n v = (normalized_value - val) / (next_val - val)\n interp_rgb = (1.0 - v) * rgb + v * next_rgb\n break\n else:\n last_color_val, color = self.rgb_colors[-1]\n if last_color_val <= normalized_value: # just use final color value\n interp_rgb = _np.array(color)\n elif self.invalid_color:\n interp_rgb = _np.array(self.invalid_color)\n else:\n raise ValueError((\"Normalized value %g should be >= final \"\n \"color value (%g) or an invalid color should\"\n \" be set\") % (normalized_value, val))\n return 'rgb(%d,%d,%d)' % (int(round(interp_rgb[0] * 255)),\n int(round(interp_rgb[1] * 255)),\n int(round(interp_rgb[2] * 255)))\n\n def create_matplotlib_norm_and_cmap(self):\n \"\"\"\n Creates and returns normalization and colormap classes for matplotlib heatmap plots.\n\n Returns\n -------\n norm, cmap\n \"\"\"\n from .mpl_colormaps import mpl_make_linear_norm as _mpl_make_linear_norm\n from .mpl_colormaps import mpl_make_linear_cmap as _mpl_make_linear_cmap\n norm = _mpl_make_linear_norm(self.hmin, self.hmax)\n cmap = _mpl_make_linear_cmap(self.rgb_colors)\n return norm, cmap\n\n\nclass LinlogColormap(Colormap):\n \"\"\"\n Colormap which combines a linear grayscale portion with a logarithmic color (by default red) portion.\n\n The transition between these occurs at a point based on a percentile of chi^2 distribution.\n\n Parameters\n ----------\n vmin : float\n The minium value of the data being colormapped.\n\n vmax : float\n The maximum value of the data being colormapped.\n\n num_boxes : int\n The number of boxes in the plot this colormap is being used with,\n so that `pcntle` gives a percentage of the *worst* box being \"red\".\n\n pcntle : float\n A number between 0 and 1 giving the probability that the worst box\n in the plot will be red. Typically a value of 0.05 is used.\n\n dof_per_box : int\n The number of degrees of freedom represented by each box, so the\n expected distribution of each box's values is chi^2_[dof_per_box].\n\n color : {\"red\",\"blue\",\"green\",\"cyan\",\"yellow\",\"purple\"}\n The color to use for the non-grayscale part of the color scale.\n \"\"\"\n\n def __init__(self, vmin, vmax, num_boxes, pcntle, dof_per_box, color=\"red\"):\n \"\"\"\n Create a new LinlogColormap.\n\n Parameters\n ----------\n vmin, vmax : float\n The min and max values of the data being colormapped.\n\n num_boxes : int\n The number of boxes in the plot this colormap is being used with,\n so that `pcntle` gives a percentage of the *worst* box being \"red\".\n\n pcntle : float\n A number between 0 and 1 giving the probability that the worst box\n in the plot will be red. Typically a value of 0.05 is used.\n\n dof_per_box : int\n The number of degrees of freedom represented by each box, so the\n expected distribution of each box's values is chi^2_[dof_per_box].\n\n color : {\"red\",\"blue\",\"green\",\"cyan\",\"yellow\",\"purple\"}\n The color to use for the non-grayscale part of the color scale.\n \"\"\"\n self.N = num_boxes\n self.percentile = pcntle\n self.dof = dof_per_box\n hmin = 0 # we'll normalize all values to [0,1] and then\n hmax = 1 # plot.ly will map this range linearly to (also) [0,1]\n # range of our (and every) colorscale.\n\n #Notes on statistics below:\n # consider random variable Y = max(X_i) and CDF of X_i's is F(x)\n # then CDF of Y is given by: P( Y <= y ) = P( max(X_i) <= y )\n # which is the probability that *all* X_i's are <= y, which equals\n # product( P(X_i <= y) ) = prod( F(y) ), so if i=1...n then\n # CDF of Y is F(y)^n.\n # Below, we need the inverse of the CDF:\n # x such that CDF(x) = given_percentage, so\n # x such that F(x)^n = percentage, so\n # x such that F(x) = percentage^{1/n}\n # Our percentage = \"1-percentile\" and b/c (1-x)^{1/n} ~= 1 - x/n\n # we take the ppf at 1-percentile/N\n\n N = max(self.N, 1) # don't divide by N == 0 (if there are no boxes)\n self.trans = _np.ceil(_chi2.ppf(1 - self.percentile / N, self.dof))\n # the linear-log transition point\n\n self.vmin = vmin\n self.vmax = max(vmax, self.trans) # so linear portion color scale ends at trans\n\n # Colors ranging from white to gray on [0.0, 0.5) and pink to red on\n # [0.5, 1.0] such that the perceived brightness of the pink matches the\n # gray.\n gray = (0.4, 0.4, 0.4)\n if color == \"red\":\n c = (0.77, 0.143, 0.146); mx = (1.0, 0, 0)\n elif color == \"blue\":\n c = (0, 0, 0.7); mx = (0, 0, 1.0)\n elif color == \"green\":\n c = (0.0, 0.483, 0.0); mx = (0, 1.0, 0)\n elif color == \"cyan\":\n c = (0.0, 0.46, 0.46); mx = (0.0, 1.0, 1.0)\n elif color == \"yellow\":\n c = (0.415, 0.415, 0.0); mx = (1.0, 1.0, 0)\n elif color == \"purple\":\n c = (0.72, 0.0, 0.72); mx = (1.0, 0, 1.0)\n else:\n raise ValueError(\"Unknown color: %s\" % color)\n\n invalid_color = (0.8, 0.8, 1.0) # a light blue?\n\n super(LinlogColormap, self).__init__(\n [[0.0, (1., 1., 1.)], [0.499999999, gray],\n [0.5, c], [1.0, mx]], hmin, hmax, invalid_color)\n\n @classmethod\n def set_manual_transition_point(cls, vmin, vmax, trans, color=\"red\"):\n \"\"\"\n Create a new LinlogColormap with a manually-specified transition point.\n\n Parameters\n ----------\n vmin : float\n The minium value of the data being colormapped.\n\n vmax : float\n The maximum value of the data being colormapped.\n\n trans : float\n The transition-point value between the linear grayscale and\n logarithmic colorscale.\n\n color : {\"red\",\"blue\",\"green\",\"cyan\",\"yellow\",\"purple\"}\n the color to use for the non-grayscale part of the color scale.\n\n Returns\n -------\n LinlogColormap\n \"\"\"\n num_boxes = 1; pcntle = 0.5; dof_per_box = 1\n cmap = cls(vmin, vmax, num_boxes, pcntle, dof_per_box, color)\n cmap.trans = trans # override __init__'s value\n cmap.vmax = max(cmap.vmax, trans) # repeat of line in __init__ that depends on trans\n return cmap\n\n @smart_cached\n def normalize(self, value):\n \"\"\"\n Scale value to a value between self.hmin and self.hmax (heatmap endpoints).\n\n Parameters\n ----------\n value : float or numpy.ndarray\n The value to normalize.\n\n Returns\n -------\n float or numpy.ndarray\n \"\"\"\n #Safety stuff -- needed anymore? TODO\n if isinstance(value, _np.ma.MaskedArray) and value.count() == 0:\n # no unmasked elements, in which case a matplotlib bug causes the\n # __call__ below to fail (numpy.bool_ has no attribute '_mask')\n return_value = _np.zeros(value.shape)\n return_value.flat[:] = _np.nan # fill with NaNs\n # so just create a dummy return value with the correct size\n # that has all its entries masked (like value does)\n if return_value.shape == (): return return_value.item()\n else: return return_value\n\n #deal with numpy bug in handling masked nan values (nan still gives\n # \"invalid value\" warnings/errors even when masked)\n if _np.ma.is_masked(value):\n value = _np.ma.array(value.filled(1e100),\n mask=_np.ma.getmask(value))\n\n lin_norm_value = _vnorm(value, self.vmin, self.vmax)\n norm_trans = _vnorm(self.trans, self.vmin, self.vmax)\n log10_norm_trans = _np.ma.log10(norm_trans)\n with _np.errstate(divide='ignore'):\n # Ignore the division-by-zero error that occurs when 0 is passed to\n # log10 (the resulting NaN is filtered out by the where and is\n # harmless).\n\n #deal with numpy bug in handling masked nan values (nan still gives\n # \"invalid value\" warnings/errors even when masked)\n if _np.ma.is_masked(lin_norm_value):\n lin_norm_value = _np.ma.array(lin_norm_value.filled(1e100),\n mask=_np.ma.getmask(lin_norm_value))\n\n if norm_trans == 1.0:\n #then transition is at highest possible normalized value (1.0)\n # and the call to greater(...) below will always be True.\n # To avoid the False-branch getting div-by-zero errors, set:\n log10_norm_trans = 1.0 # because it's never used.\n\n off = 0.1 # offset to narrow the range of valid values to 0 (white) is never used for data\n in_0_to_1 = lin_norm_value / norm_trans # this is in range [0,1] where lin_norm_value <= norm_trans\n return_value = _np.ma.where(_np.ma.greater(norm_trans, lin_norm_value),\n # map = [0,1] -> [off/(1+off), 1] -> [off/(2*(1+off)), 0.5]\n (in_0_to_1 + off) / (1.0 + off) * 0.5,\n (log10_norm_trans\n - _np.ma.log10(lin_norm_value))\n / (2 * log10_norm_trans) + 0.5)\n return_value = return_value.filled(_np.nan) # replace masked values with NaNs for color mapping\n\n if return_value.shape == ():\n return return_value.item()\n else:\n return return_value\n\n def create_matplotlib_norm_and_cmap(self):\n \"\"\"\n Creates and returns normalization and colormap classes for matplotlib heatmap plots.\n\n Returns\n -------\n norm, cmap\n \"\"\"\n from .mpl_colormaps import MplLinLogNorm as _mpl_LinLogNorm\n _, cmap = super(LinlogColormap, self).create_matplotlib_norm_and_cmap()\n norm = _mpl_LinLogNorm(self)\n cmap.set_bad('w', 1)\n return norm, cmap\n\n\nclass DivergingColormap(Colormap):\n \"\"\"\n A diverging color map\n\n Parameters\n ----------\n vmin : float\n The minium value of the data being colormapped.\n\n vmax : float\n The maximum value of the data being colormapped.\n\n midpoint : float, optional\n The midpoint of the color scale.\n\n color : {\"RdBu\"}\n What colors to use.\n \"\"\"\n\n def __init__(self, vmin, vmax, midpoint=0.0, color=\"RdBu\"):\n \"\"\"\n Create a new DivergingColormap\n\n Parameters\n ----------\n vmin, vmax : float\n Min and max values of the data being colormapped.\n\n midpoint : float, optional\n The midpoint of the color scale.\n\n color : {\"RdBu\"}\n What colors to use.\n \"\"\"\n hmin = vmin\n hmax = vmax\n self.midpoint = midpoint\n assert(midpoint == 0.0), \"midpoint doesn't work yet!\"\n\n if color == \"RdBu\": # blue -> white -> red\n rgb_colors = [[0.0, (0.0, 0.0, 1.0)],\n [0.5, (1.0, 1.0, 1.0)],\n [1.0, (1.0, 0.0, 0.0)]]\n else:\n raise ValueError(\"Unknown color: %s\" % color)\n\n super(DivergingColormap, self).__init__(rgb_colors, hmin, hmax)\n\n #*Normalize* scratch\n #vmin, vmax, midpoint = self.vmin, self.vmax, self.midpoint\n #\n #is_scalar = False\n #if isinstance(value, float) or _compat.isint(value, int):\n # is_scalar = True\n #result = _np.ma.array(value)\n #\n #if not (vmin < midpoint < vmax):\n # raise ValueError(\"midpoint must be between maxvalue and minvalue.\")\n #elif vmin == vmax:\n # result.fill(0) # Or should it be all masked? Or 0.5?\n #elif vmin > vmax:\n # raise ValueError(\"maxvalue must be bigger than minvalue\")\n #else:\n # # ma division is very slow; we can take a shortcut\n # resdat = result.filled(0) #masked entries to 0 to avoid nans\n #\n # #First scale to -1 to 1 range, than to from 0 to 1.\n # resdat -= midpoint\n # resdat[resdat>0] /= abs(vmax - midpoint)\n # resdat[resdat<0] /= abs(vmin - midpoint)\n #\n # resdat /= 2.\n # resdat += 0.5\n # result = _np.ma.array(resdat, mask=result.mask, copy=False)\n #\n #if is_scalar:\n # result = float(result)\n #return result\n\n\nclass SequentialColormap(Colormap):\n \"\"\"\n A sequential color map\n\n Parameters\n ----------\n vmin : float\n The minium value of the data being colormapped.\n\n vmax : float\n The maximum value of the data being colormapped.\n\n color : {\"whiteToBlack\", \"blackToWhite\"}\n What colors to use.\n \"\"\"\n\n def __init__(self, vmin, vmax, color=\"whiteToBlack\"):\n \"\"\"\n Create a new SequentialColormap\n\n Parameters\n ----------\n vmin, vmax : float\n Min and max values of the data being colormapped.\n\n color : {\"whiteToBlack\", \"blackToWhite\"}\n What colors to use.\n \"\"\"\n hmin = vmin\n hmax = vmax\n\n if color == \"whiteToBlack\":\n rgb_colors = [[0, (1., 1., 1.)], [1.0, (0.0, 0.0, 0.0)]]\n elif color == \"blackToWhite\":\n rgb_colors = [[0, (0.0, 0.0, 0.0)], [1.0, (1., 1., 1.)]]\n elif color == \"whiteToBlue\":\n rgb_colors = [[0, (1., 1., 1.)], [1.0, (0., 0., 1.)]]\n elif color == \"whiteToRed\":\n rgb_colors = [[0, (1., 1., 1.)], [1.0, (1., 0., 0.)]]\n else:\n raise ValueError(\"Unknown color: %s\" % color)\n\n super(SequentialColormap, self).__init__(rgb_colors, hmin, hmax)\n\n #*Normalize* scratch\n #is_scalar = False\n #if isinstance(value, float) or _compat.isint(value, int):\n # is_scalar = True\n #\n #result = _np.ma.array(value)\n #\n #if self.vmin == self.vmax:\n # result.fill(0) # Or should it be all masked? Or 0.5?\n #elif self.vmin > self.vmax:\n # raise ValueError(\"maxvalue must be bigger than minvalue\")\n #else:\n # resdat = result.filled(0) #masked entries to 0 to avoid nans\n # resdat = _vnorm(resdat, self.vmin, self.vmax)\n # result = _np.ma.array(resdat, mask=result.mask, copy=False)\n #\n #if is_scalar:\n # result = result[0]\n #return result\n\n\nclass PiecewiseLinearColormap(Colormap):\n \"\"\"\n A piecewise-linear color map\n\n Parameters\n ----------\n rgb_colors : list\n A list of `[val, (R,G,B)]` elements where `val` is a floating point\n number (pre-normalization) of the value corresponding to the color\n given by `R`,`G`,and `B`: red, green, and blue floating point values\n in [0,1]. The color will be interpolated between the different \"point\"\n elements in this list.\n \"\"\"\n\n def __init__(self, rgb_colors):\n \"\"\"\n Create a new PiecewiseLinearColormap\n\n Parameters\n ----------\n rgb_colors : list\n A list of `[val, (R,G,B)]` elements where `val` is a floating point\n number (pre-normalization) of the value corresponding to the color\n given by `R`,`G`,and `B`: red, green, and blue floating point values\n in [0,1]. The color will be interpolated between the different \"point\"\n elements in this list.\n \"\"\"\n hmin = min([v for v, rgb in rgb_colors])\n hmax = max([v for v, rgb in rgb_colors])\n\n def norm(x): # normalize color \"point\" values to [0,1] interval\n return (x - hmin) / (hmax - hmin) if (hmax > hmin) else 0.0\n\n norm_rgb_colors = [[norm(val), rgb] for val, rgb in rgb_colors]\n super(PiecewiseLinearColormap, self).__init__(norm_rgb_colors, hmin, hmax)\n", "\"\"\"\nRoutines for converting python objects to python.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport collections as _collections\n\nfrom pygsti.report.reportableqty import ReportableQty as _ReportableQty\n\n'''\ntable() and cell() functions are used by table.py in table creation\neverything else is used in creating formatters in formatters.py\n'''\n\n\ndef table(custom_headings, col_headings_formatted, rows, spec):\n \"\"\"\n Create a \"Python table\" - really a pandas DataFrame\n\n Parameters\n ----------\n custom_headings : None, dict\n optional dictionary of custom table headings\n\n col_headings_formatted : list\n formatted column headings\n\n rows : list of lists of cell-strings\n Data in the table, pre-formatted\n\n spec : dict\n options for the formatter\n\n Returns\n -------\n dict : contains key 'python', which corresponds to a\n pandas.DataFrame object representing the table\n \"\"\"\n try:\n import pandas as _pd\n except ImportError:\n raise ValueError((\"You must have the optional 'pandas' package \"\n \"installed to render tables in the 'python' format\"))\n\n def getval(lbl):\n return lbl.value if isinstance(lbl, _ReportableQty) else lbl\n\n if custom_headings is not None \\\n and \"python\" in custom_headings:\n colLabels = custom_headings['python']\n else:\n colLabels = [getval(x) for x in col_headings_formatted]\n nCols = len(colLabels)\n\n if nCols == 0: return {'python': _pd.DataFrame()}\n\n #Remove duplicate in colLabels (otherwise these cols get merged weirdly below)\n for i in range(len(colLabels)):\n if colLabels[i] in colLabels[0:i]:\n k = 1\n while colLabels[i] + str(k) in colLabels[0:i]: k += 1\n colLabels[i] = colLabels[i] + str(k)\n\n #Add addition error-bar columns for any columns that have error bar info\n cols_containing_ebs = set()\n for formatted_rowData in rows:\n assert(len(formatted_rowData) == nCols)\n for i, formatted_cellData in enumerate(formatted_rowData):\n if isinstance(formatted_cellData, _ReportableQty) and \\\n formatted_cellData.has_errorbar:\n cols_containing_ebs.add(i)\n\n n = 0 # number of cols inserted\n for iCol in sorted(cols_containing_ebs):\n origLbl = colLabels[iCol + n]\n colLabels.insert(iCol + n + 1, origLbl + \" Error Bar\")\n n += 1\n\n rowLabels = []\n rowIndexName = getval(colLabels[0])\n if len(rowIndexName.strip()) == 0:\n rowIndexName = None\n\n dict_of_columns = _collections.OrderedDict()\n for colLabel in colLabels[1:]:\n dict_of_columns[colLabel] = []\n\n for formatted_rowData in rows:\n rowLabels.append(getval(formatted_rowData[0])); n = 0\n\n for i, formatted_cellData in enumerate(formatted_rowData[1:], start=1):\n if i in cols_containing_ebs:\n if isinstance(formatted_cellData, _ReportableQty):\n val, eb = formatted_cellData.value_and_errorbar\n else:\n val, eb = formatted_cellData, None\n dict_of_columns[colLabels[i + n]].append(val)\n dict_of_columns[colLabels[i + n + 1]].append(eb)\n n += 1\n else:\n dict_of_columns[colLabels[i + n]].append(getval(formatted_cellData))\n\n indx = _pd.Index(rowLabels, name=rowIndexName)\n #print(\"DB PANDAS: headings=\",colLabels) #DEBUG\n #print(\"col_dict(cnt) = \", [(k,len(v)) for k,v in dict_of_columns.items()]) #DEBUG\n df = _pd.DataFrame(dict_of_columns,\n columns=dict_of_columns.keys(),\n index=indx)\n\n return {'python': df}\n\n\ndef cell(data, label, spec):\n \"\"\"\n Format the cell of a python table\n\n Parameters\n ----------\n data : string\n string representation of cell content\n\n label : string\n optional cell label, used for tooltips\n\n spec : dict\n options for the formatters\n\n Returns\n -------\n string\n \"\"\"\n return data\n\n\ndef list(l, specs):\n \"\"\"\n Stub for conversion that isn't needed in python case.\n\n (Convert a python list to python.)\n\n Parameters\n ----------\n l : list\n list to convert into latex. sub-items pre formatted\n\n specs : dictionary\n Dictionary of user-specified and default parameters to formatting\n\n Returns\n -------\n list\n \"\"\"\n return l\n\n\ndef vector(v, specs):\n \"\"\"\n Stub for conversion that isn't needed in python case.\n\n (Convert a 1D numpy array to python.)\n\n Parameters\n ----------\n v : numpy array\n 1D array to convert.\n\n specs : dictionary\n Dictionary of user-specified and default parameters to formatting\n\n Returns\n -------\n numpy array\n \"\"\"\n return v\n\n\ndef matrix(m, specs):\n \"\"\"\n Stub for conversion that isn't needed in python case.\n\n Convert a 2D numpy array to python.\n\n Parameters\n ----------\n m : numpy array\n 2D array to convert.\n\n specs : dictionary\n Dictionary of user-specified and default parameters to formatting\n\n Returns\n -------\n numpy array\n \"\"\"\n return m\n\n\ndef value(el, specs):\n \"\"\"\n Stub for conversion that isn't needed in python case.\n\n (this function would be for converting python to python).\n\n Parameters\n ----------\n el : float or complex\n Value to convert into latex.\n\n specs : dictionary\n Dictionary of user-specified and default parameters to formatting\n\n Returns\n -------\n float or complex\n \"\"\"\n return el\n\n\ndef escaped(txt, specs):\n \"\"\"\n Stub for conversion that isn't needed in python case.\n\n (Escape txt so it is python safe.)\n\n Parameters\n ----------\n txt : string\n value to escape\n\n specs : dictionary\n Dictionary of user-specified and default parameters to formatting\n\n Returns\n -------\n string\n \"\"\"\n return txt\n" ]
[ [ "numpy.dot", "numpy.expand_dims", "numpy.kron", "numpy.max", "numpy.argmin", "numpy.any", "numpy.where", "numpy.trace", "numpy.conjugate", "numpy.linalg.eig", "numpy.tensordot", "numpy.zeros", "numpy.linalg.inv", "numpy.array", "numpy.sum", "numpy.linalg.norm", "numpy.linalg.eigvalsh", "numpy.average", "numpy.empty" ], [ "matplotlib.pyplot.title", "numpy.isnan", "matplotlib.pyplot.subplots", "matplotlib.colors.ListedColormap", "matplotlib.cm.get_cmap", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ], [ "numpy.array", "numpy.identity" ], [ "numpy.may_share_memory" ], [ "scipy.stats.chi2.ppf", "numpy.sqrt", "numpy.clip", "numpy.ma.log10", "numpy.ma.getmask", "numpy.ma.greater", "numpy.errstate", "numpy.array", "numpy.ma.zeros", "numpy.ma.is_masked", "numpy.zeros", "numpy.isclose" ], [ "pandas.Index", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ClarePan/Tax-Calculator
[ "d2d6cb4b551f34017db7166d91d982b5c4670816", "d2d6cb4b551f34017db7166d91d982b5c4670816" ]
[ "taxcalc/tests/test_calculate.py", "taxcalc/calculate.py" ]
[ "# CODING-STYLE CHECKS:\n# pycodestyle test_calculate.py\n\nimport os\nimport json\nfrom io import StringIO\nimport tempfile\nimport copy\nimport six\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom taxcalc import Policy, Records, Calculator, Behavior, Consumption\n\n\nRAWINPUTFILE_FUNITS = 4\nRAWINPUTFILE_YEAR = 2015\nRAWINPUTFILE_CONTENTS = (\n 'RECID,MARS\\n'\n '1,2\\n'\n '2,1\\n'\n '3,4\\n'\n '4,3\\n'\n)\n\n\[email protected](scope='module', name='rawinputfile')\ndef fixture_rawinputfile():\n \"\"\"\n Temporary input file that contains the minimum required input varaibles.\n \"\"\"\n ifile = tempfile.NamedTemporaryFile(mode='a', delete=False)\n ifile.write(RAWINPUTFILE_CONTENTS)\n ifile.close()\n # must close and then yield for Windows platform\n yield ifile\n if os.path.isfile(ifile.name):\n try:\n os.remove(ifile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n\n\[email protected](scope='module', name='policyfile')\ndef fixture_policyfile():\n txt = \"\"\"{\"_almdep\": {\"value\": [7150, 7250, 7400]},\n \"_almsep\": {\"value\": [40400, 41050]},\n \"_rt5\": {\"value\": [0.33 ]},\n \"_rt7\": {\"value\": [0.396]}}\"\"\"\n f = tempfile.NamedTemporaryFile(mode=\"a\", delete=False)\n f.write(txt + \"\\n\")\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\ndef test_make_calculator(cps_subsample):\n syr = 2014\n pol = Policy(start_year=syr, num_years=9)\n assert pol.current_year == syr\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n consump = Consumption()\n consump.update_consumption({syr: {'_MPC_e20400': [0.05]}})\n assert consump.current_year == Consumption.JSON_START_YEAR\n calc = Calculator(policy=pol, records=rec,\n consumption=consump, behavior=Behavior())\n assert calc.current_year == syr\n assert calc.records_current_year() == syr\n # test incorrect Calculator instantiation:\n with pytest.raises(ValueError):\n Calculator(policy=None, records=rec)\n with pytest.raises(ValueError):\n Calculator(policy=pol, records=None)\n with pytest.raises(ValueError):\n Calculator(policy=pol, records=rec, behavior=list())\n with pytest.raises(ValueError):\n Calculator(policy=pol, records=rec, consumption=list())\n\n\ndef test_make_calculator_deepcopy(cps_subsample):\n pol = Policy()\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc1 = Calculator(policy=pol, records=rec)\n calc2 = copy.deepcopy(calc1)\n assert isinstance(calc2, Calculator)\n\n\ndef test_make_calculator_with_policy_reform(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n year = rec.current_year\n # create a Policy object and apply a policy reform\n pol = Policy()\n reform = {2013: {'_II_em': [4000], '_II_em_cpi': False,\n '_STD_Aged': [[1600, 1300, 1300, 1600, 1600]],\n '_STD_Aged_cpi': False}}\n pol.implement_reform(reform)\n # create a Calculator object using this policy reform\n calc = Calculator(policy=pol, records=rec)\n # check that Policy object embedded in Calculator object is correct\n assert calc.current_year == year\n assert calc.policy_param('II_em') == 4000\n assert np.allclose(calc.policy_param('_II_em'),\n np.array([4000] * Policy.DEFAULT_NUM_YEARS))\n exp_STD_Aged = [[1600, 1300, 1300,\n 1600, 1600]] * Policy.DEFAULT_NUM_YEARS\n assert np.allclose(calc.policy_param('_STD_Aged'),\n np.array(exp_STD_Aged))\n assert np.allclose(calc.policy_param('STD_Aged'),\n np.array([1600, 1300, 1300, 1600, 1600]))\n\n\ndef test_make_calculator_with_multiyear_reform(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n year = rec.current_year\n # create a Policy object and apply a policy reform\n pol = Policy()\n reform = {2015: {}, 2016: {}}\n reform[2015]['_II_em'] = [5000, 6000] # reform values for 2015 and 2016\n reform[2015]['_II_em_cpi'] = False\n reform[2016]['_STD_Aged'] = [[1600, 1300, 1600, 1300, 1600]]\n pol.implement_reform(reform)\n # create a Calculator object using this policy-reform\n calc = Calculator(policy=pol, records=rec)\n # check that Policy object embedded in Calculator object is correct\n assert pol.num_years == Policy.DEFAULT_NUM_YEARS\n assert calc.current_year == year\n assert calc.policy_param('II_em') == 3950\n exp_II_em = [3900, 3950, 5000] + [6000] * (Policy.DEFAULT_NUM_YEARS - 3)\n assert np.allclose(calc.policy_param('_II_em'),\n np.array(exp_II_em))\n calc.increment_year()\n calc.increment_year()\n assert calc.current_year == 2016\n assert np.allclose(calc.policy_param('STD_Aged'),\n np.array([1600, 1300, 1600, 1300, 1600]))\n\n\ndef test_calculator_advance_to_year(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n pol = Policy()\n calc = Calculator(policy=pol, records=rec)\n calc.advance_to_year(2016)\n assert calc.current_year == 2016\n with pytest.raises(ValueError):\n calc.advance_to_year(2015)\n\n\ndef test_make_calculator_raises_on_no_policy(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n with pytest.raises(ValueError):\n Calculator(records=rec)\n\n\ndef test_calculator_mtr(cps_subsample):\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calcx = Calculator(policy=Policy(), records=rec)\n calcx.calc_all()\n combinedx = calcx.array('combined')\n c00100x = calcx.array('c00100')\n calc = Calculator(policy=Policy(), records=rec)\n recs_pre_e00200p = copy.deepcopy(calc.array('e00200p'))\n (mtr_ptx, mtr_itx, mtr_cmb) = calc.mtr(variable_str='e00200p',\n zero_out_calculated_vars=True)\n recs_post_e00200p = calc.array('e00200p')\n assert np.allclose(recs_post_e00200p, recs_pre_e00200p)\n assert np.allclose(calc.array('combined'), combinedx)\n assert np.allclose(calc.array('c00100'), c00100x)\n assert np.array_equal(mtr_cmb, mtr_ptx) is False\n assert np.array_equal(mtr_ptx, mtr_itx) is False\n with pytest.raises(ValueError):\n calc.mtr(variable_str='bad_income_type')\n (_, _, mtr_combined) = calc.mtr(variable_str='e00200s',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e00650',\n negative_finite_diff=True,\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e00900p',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e01700',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e26270',\n calc_all_already_called=True)\n assert isinstance(mtr_combined, np.ndarray)\n (_, _, mtr_combined) = calc.mtr(variable_str='e00200p',\n calc_all_already_called=True)\n assert np.allclose(mtr_combined, mtr_cmb)\n assert np.allclose(calc.array('combined'), combinedx)\n assert np.allclose(calc.array('c00100'), c00100x)\n\n\ndef test_calculator_mtr_when_PT_rates_differ():\n reform = {2013: {'_II_rt1': [0.40],\n '_II_rt2': [0.40],\n '_II_rt3': [0.40],\n '_II_rt4': [0.40],\n '_II_rt5': [0.40],\n '_II_rt6': [0.40],\n '_II_rt7': [0.40],\n '_PT_rt1': [0.30],\n '_PT_rt2': [0.30],\n '_PT_rt3': [0.30],\n '_PT_rt4': [0.30],\n '_PT_rt5': [0.30],\n '_PT_rt6': [0.30],\n '_PT_rt7': [0.30]}}\n funit = (\n u'RECID,MARS,FLPDYR,e00200,e00200p,e00900,e00900p,extraneous\\n'\n u'1, 1, 2009, 200000,200000, 100000,100000, 9999999999\\n'\n )\n rec = Records(pd.read_csv(StringIO(funit)))\n pol = Policy()\n calc1 = Calculator(policy=pol, records=rec)\n (_, mtr1, _) = calc1.mtr(variable_str='p23250')\n pol.implement_reform(reform)\n calc2 = Calculator(policy=pol, records=rec)\n (_, mtr2, _) = calc2.mtr(variable_str='p23250')\n assert np.allclose(mtr1, mtr2, rtol=0.0, atol=1e-06)\n\n\ndef test_make_calculator_increment_years_first(cps_subsample):\n # create Policy object with policy reform\n syr = 2013\n pol = Policy(start_year=syr)\n reform = {2015: {}, 2016: {}}\n std5 = 2000\n reform[2015]['_STD_Aged'] = [[std5, std5, std5, std5, std5]]\n reform[2015]['_II_em'] = [5000]\n reform[2016]['_II_em'] = [6000]\n reform[2016]['_II_em_cpi'] = False\n pol.implement_reform(reform)\n # create Calculator object with Policy object as modified by reform\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=pol, records=rec)\n # compare expected policy parameter values with those embedded in calc\n irates = pol.inflation_rates()\n irate2015 = irates[2015 - syr]\n irate2016 = irates[2016 - syr]\n std6 = std5 * (1.0 + irate2015)\n std7 = std6 * (1.0 + irate2016)\n exp_STD_Aged = np.array([[1500, 1200, 1200, 1500, 1500],\n [1550, 1200, 1200, 1550, 1550],\n [std5, std5, std5, std5, std5],\n [std6, std6, std6, std6, std6],\n [std7, std7, std7, std7, std7]])\n act_STD_Aged = calc.policy_param('_STD_Aged')\n assert np.allclose(act_STD_Aged[:5], exp_STD_Aged)\n exp_II_em = np.array([3900, 3950, 5000, 6000, 6000])\n act_II_em = calc.policy_param('_II_em')\n assert np.allclose(act_II_em[:5], exp_II_em)\n\n\ndef test_ID_HC_vs_BS(cps_subsample):\n \"\"\"\n Test that complete haircut of itemized deductions produces same\n results as a 100% benefit surtax with no benefit deduction.\n \"\"\"\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n # specify complete-haircut reform policy and Calculator object\n hc_reform = {2013: {'_ID_Medical_hc': [1.0],\n '_ID_StateLocalTax_hc': [1.0],\n '_ID_RealEstate_hc': [1.0],\n '_ID_Casualty_hc': [1.0],\n '_ID_Miscellaneous_hc': [1.0],\n '_ID_InterestPaid_hc': [1.0],\n '_ID_Charity_hc': [1.0]}}\n hc_policy = Policy()\n hc_policy.implement_reform(hc_reform)\n hc_calc = Calculator(policy=hc_policy, records=recs)\n hc_calc.calc_all()\n hc_taxes = hc_calc.dataframe(['iitax', 'payrolltax'])\n del hc_calc\n # specify benefit-surtax reform policy and Calculator object\n bs_reform = {2013: {'_ID_BenefitSurtax_crt': [0.0],\n '_ID_BenefitSurtax_trt': [1.0]}}\n bs_policy = Policy()\n bs_policy.implement_reform(bs_reform)\n bs_calc = Calculator(policy=bs_policy, records=recs)\n bs_calc.calc_all()\n bs_taxes = bs_calc.dataframe(['iitax', 'payrolltax'])\n del bs_calc\n # compare calculated taxes generated by the two reforms\n assert np.allclose(hc_taxes['payrolltax'], bs_taxes['payrolltax'])\n assert np.allclose(hc_taxes['iitax'], bs_taxes['iitax'])\n\n\ndef test_ID_StateLocal_HC_vs_CRT(cps_subsample):\n \"\"\"\n Test that a cap on state/local income and sales tax deductions at 0 percent\n of AGI is equivalent to a complete haircut on the same state/local tax\n deductions.\n \"\"\"\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n # specify state/local complete haircut reform policy and Calculator object\n hc_reform = {2013: {'_ID_StateLocalTax_hc': [1.0]}}\n hc_policy = Policy()\n hc_policy.implement_reform(hc_reform)\n hc_calc = Calculator(policy=hc_policy, records=rec)\n hc_calc.calc_all()\n # specify AGI cap reform policy and Calculator object\n crt_reform = {2013: {'_ID_StateLocalTax_crt': [0.0]}}\n crt_policy = Policy()\n crt_policy.implement_reform(crt_reform)\n crt_calc = Calculator(policy=crt_policy, records=rec)\n crt_calc.calc_all()\n # compare calculated tax results generated by the two reforms\n assert np.allclose(hc_calc.array('payrolltax'),\n crt_calc.array('payrolltax'))\n assert np.allclose(hc_calc.array('iitax'),\n crt_calc.array('iitax'))\n\n\ndef test_ID_RealEstate_HC_vs_CRT(cps_subsample):\n \"\"\"\n Test that a cap on all state, local, and foreign real estate tax deductions\n at 0 percent of AGI is equivalent to a complete haircut on the same real\n estate tax deductions.\n \"\"\"\n rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n # specify real estate complete haircut reform policy and Calculator object\n hc_reform = {2013: {'_ID_RealEstate_hc': [1.0]}}\n hc_policy = Policy()\n hc_policy.implement_reform(hc_reform)\n hc_calc = Calculator(policy=hc_policy, records=rec)\n hc_calc.calc_all()\n # specify AGI cap reform policy and Calculator object\n crt_reform = {2013: {'_ID_RealEstate_crt': [0.0]}}\n crt_policy = Policy()\n crt_policy.implement_reform(crt_reform)\n crt_calc = Calculator(policy=crt_policy, records=rec)\n crt_calc.calc_all()\n # compare calculated tax results generated by the two reforms\n assert np.allclose(hc_calc.array('payrolltax'),\n crt_calc.array('payrolltax'))\n assert np.allclose(hc_calc.array('iitax'),\n crt_calc.array('iitax'))\n\n\ndef test_calculator_using_nonstd_input(rawinputfile):\n # check Calculator handling of raw, non-standard input data with no aging\n pol = Policy()\n pol.set_year(RAWINPUTFILE_YEAR) # set policy params to input data year\n nonstd = Records(data=rawinputfile.name,\n gfactors=None, # keeps raw data unchanged\n weights=None,\n start_year=RAWINPUTFILE_YEAR) # set raw input data year\n assert nonstd.array_length == RAWINPUTFILE_FUNITS\n calc = Calculator(policy=pol, records=nonstd,\n sync_years=False) # keeps raw data unchanged\n assert calc.current_year == RAWINPUTFILE_YEAR\n calc.calc_all()\n assert calc.weighted_total('e00200') == 0\n assert calc.total_weight() == 0\n varlist = ['RECID', 'MARS']\n pdf = calc.dataframe(varlist)\n assert isinstance(pdf, pd.DataFrame)\n assert pdf.shape == (RAWINPUTFILE_FUNITS, len(varlist))\n mars = calc.array('MARS')\n assert isinstance(mars, np.ndarray)\n assert mars.shape == (RAWINPUTFILE_FUNITS,)\n exp_iitax = np.zeros((nonstd.array_length,))\n assert np.allclose(calc.array('iitax'), exp_iitax)\n mtr_ptax, _, _ = calc.mtr(wrt_full_compensation=False)\n exp_mtr_ptax = np.zeros((nonstd.array_length,))\n exp_mtr_ptax.fill(0.153)\n assert np.allclose(mtr_ptax, exp_mtr_ptax)\n\n\nREFORM_CONTENTS = \"\"\"\n// Example of a reform file suitable for read_json_param_objects().\n// This JSON file can contain any number of trailing //-style comments, which\n// will be removed before the contents are converted from JSON to a dictionary.\n// Within each \"policy\" object, the primary keys are parameters and\n// the secondary keys are years.\n// Both the primary and secondary key values must be enclosed in quotes (\").\n// Boolean variables are specified as true or false (no quotes; all lowercase).\n// Parameter code in the policy object is enclosed inside a pair of double\n// pipe characters (||).\n{\n \"policy\": {\n \"_AMT_brk1\": // top of first AMT tax bracket\n {\"2015\": [200000],\n \"2017\": [300000]\n },\n \"_EITC_c\": // maximum EITC amount by number of qualifying kids (0,1,2,3+)\n {\"2016\": [[ 900, 5000, 8000, 9000]],\n \"2019\": [[1200, 7000, 10000, 12000]]\n },\n \"_II_em\": // personal exemption amount (see indexing changes below)\n {\"2016\": [6000],\n \"2018\": [7500],\n \"2020\": [9000]\n },\n \"_II_em_cpi\": // personal exemption amount indexing status\n {\"2016\": false, // values in future years are same as this year value\n \"2018\": true // values in future years indexed with this year as base\n },\n \"_SS_Earnings_c\": // social security (OASDI) maximum taxable earnings\n {\"2016\": [300000],\n \"2018\": [500000],\n \"2020\": [700000]\n },\n \"_AMT_em_cpi\": // AMT exemption amount indexing status\n {\"2017\": false, // values in future years are same as this year value\n \"2020\": true // values in future years indexed with this year as base\n }\n }\n}\n\"\"\"\n\n\[email protected](scope='module', name='reform_file')\ndef fixture_reform_file():\n \"\"\"\n Temporary reform file for read_json_param_objects() function.\n \"\"\"\n rfile = tempfile.NamedTemporaryFile(mode='a', delete=False)\n rfile.write(REFORM_CONTENTS)\n rfile.close()\n # must close and then yield for Windows platform\n yield rfile\n if os.path.isfile(rfile.name):\n try:\n os.remove(rfile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n\n\nASSUMP_CONTENTS = \"\"\"\n// Example of assump file suitable for the read_json_param_objects().\n// This JSON file can contain any number of trailing //-style comments, which\n// will be removed before the contents are converted from JSON to a dictionary.\n// Within each \"behavior\", \"consumption\" and \"growth\" object, the\n// primary keys are parameters and the secondary keys are years.\n// Both the primary and secondary key values must be enclosed in quotes (\").\n// Boolean variables are specified as true or false (no quotes; all lowercase).\n{\n \"consumption\": { \"_MPC_e18400\": {\"2018\": [0.05]} },\n \"behavior\": {},\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n}\n\"\"\"\n\n\[email protected](scope='module', name='assump_file')\ndef fixture_assump_file():\n \"\"\"\n Temporary assumption file for read_json_params_files() function.\n \"\"\"\n afile = tempfile.NamedTemporaryFile(mode='a', delete=False)\n afile.write(ASSUMP_CONTENTS)\n afile.close()\n # must close and then yield for Windows platform\n yield afile\n if os.path.isfile(afile.name):\n try:\n os.remove(afile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file\n\n\[email protected](\"set_year\", [False, True])\ndef test_read_json_reform_file_and_implement_reform(reform_file,\n assump_file,\n set_year):\n \"\"\"\n Test reading and translation of reform file into a reform dictionary\n that is then used to call implement_reform method and Calculate.calc_all()\n NOTE: implement_reform called when policy.current_year == policy.start_year\n \"\"\"\n pol = Policy()\n if set_year:\n pol.set_year(2015)\n param_dict = Calculator.read_json_param_objects(reform_file.name,\n assump_file.name)\n pol.implement_reform(param_dict['policy'])\n syr = pol.start_year\n amt_brk1 = pol._AMT_brk1\n assert amt_brk1[2015 - syr] == 200000\n assert amt_brk1[2016 - syr] > 200000\n assert amt_brk1[2017 - syr] == 300000\n assert amt_brk1[2018 - syr] > 300000\n ii_em = pol._II_em\n assert ii_em[2016 - syr] == 6000\n assert ii_em[2017 - syr] == 6000\n assert ii_em[2018 - syr] == 7500\n assert ii_em[2019 - syr] > 7500\n assert ii_em[2020 - syr] == 9000\n assert ii_em[2021 - syr] > 9000\n amt_em = pol._AMT_em\n assert amt_em[2016 - syr, 0] > amt_em[2015 - syr, 0]\n assert amt_em[2017 - syr, 0] > amt_em[2016 - syr, 0]\n assert amt_em[2018 - syr, 0] == amt_em[2017 - syr, 0]\n assert amt_em[2019 - syr, 0] == amt_em[2017 - syr, 0]\n assert amt_em[2020 - syr, 0] == amt_em[2017 - syr, 0]\n assert amt_em[2021 - syr, 0] > amt_em[2020 - syr, 0]\n assert amt_em[2022 - syr, 0] > amt_em[2021 - syr, 0]\n add4aged = pol._ID_Medical_frt_add4aged\n assert add4aged[2015 - syr] == -0.025\n assert add4aged[2016 - syr] == -0.025\n assert add4aged[2017 - syr] == 0.0\n assert add4aged[2022 - syr] == 0.0\n\n\[email protected](scope='module', name='bad1reformfile')\ndef fixture_bad1reformfile():\n # specify JSON text for reform\n txt = \"\"\"\n {\n \"policy\": { // example of incorrect JSON because 'x' must be \"x\"\n 'x': {\"2014\": [4000]}\n }\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad2reformfile')\ndef fixture_bad2reformfile():\n # specify JSON text for reform\n txt = \"\"\"\n {\n \"title\": \"\",\n \"policyx\": { // example of reform file not containing \"policy\" key\n \"_SS_Earnings_c\": {\"2018\": [9e99]}\n }\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad3reformfile')\ndef fixture_bad3reformfile():\n # specify JSON text for reform\n txt = \"\"\"\n {\n \"title\": \"\",\n \"policy\": {\n \"_SS_Earnings_c\": {\"2018\": [9e99]}\n },\n \"behavior\": { // example of misplaced \"behavior\" key\n }\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\ndef test_read_bad_json_reform_file(bad1reformfile, bad2reformfile,\n bad3reformfile):\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(bad1reformfile.name, None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(bad2reformfile.name, None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(bad3reformfile.name, None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(list(), None)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, 'unknown_file_name')\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, list())\n\n\[email protected](scope='module', name='bad1assumpfile')\ndef fixture_bad1assumpfile():\n # specify JSON text for assumptions\n txt = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": { // example of incorrect JSON because 'x' must be \"x\"\n 'x': {\"2014\": [0.25]}\n },\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad2assumpfile')\ndef fixture_bad2assumpfile():\n # specify JSON text for assumptions\n txt = \"\"\"\n {\n \"consumption\": {},\n \"behaviorx\": {}, // example of assump file not containing \"behavior\" key\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\[email protected](scope='module', name='bad3assumpfile')\ndef fixture_bad3assumpfile():\n # specify JSON text for assump\n txt = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": {},\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"policy\": { // example of misplaced policy key\n \"_SS_Earnings_c\": {\"2018\": [9e99]}\n },\n \"growmodel\": {}\n }\n \"\"\"\n f = tempfile.NamedTemporaryFile(mode='a', delete=False)\n f.write(txt + '\\n')\n f.close()\n # Must close and then yield for Windows platform\n yield f\n os.remove(f.name)\n\n\ndef test_read_bad_json_assump_file(bad1assumpfile, bad2assumpfile,\n bad3assumpfile):\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, bad1assumpfile.name)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, bad2assumpfile.name)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, bad3assumpfile.name)\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, 'unknown_file_name')\n with pytest.raises(ValueError):\n Calculator.read_json_param_objects(None, list())\n\n\ndef test_convert_parameter_dict():\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({2013: {'2013': [40000]}})\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({'_II_em': {2013: [40000]}})\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({4567: {2013: [40000]}})\n with pytest.raises(ValueError):\n Calculator._convert_parameter_dict({'_II_em': 40000})\n rdict = Calculator._convert_parameter_dict({'_II_em': {'2013': [40000]}})\n assert isinstance(rdict, dict)\n\n\ndef test_calc_all(reform_file, rawinputfile):\n cyr = 2016\n pol = Policy()\n param_dict = Calculator.read_json_param_objects(reform_file.name, None)\n pol.implement_reform(param_dict['policy'])\n pol.set_year(cyr)\n nonstd = Records(data=rawinputfile.name, gfactors=None,\n weights=None, start_year=cyr)\n assert nonstd.array_length == RAWINPUTFILE_FUNITS\n calc = Calculator(policy=pol, records=nonstd,\n sync_years=False) # keeps raw data unchanged\n assert calc.current_year == cyr\n assert calc.reform_warnings == ''\n\n\ndef test_translate_json_reform_suffixes_mars_non_indexed():\n # test read_json_param_objects()\n # using MARS-indexed parameter suffixes\n json1 = \"\"\"{\"policy\": {\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]},\n \"_AMEDT_ec_joint\": {\"2018\": [400000], \"2016\": [300000]},\n \"_AMEDT_ec_separate\": {\"2017\": [150000], \"2019\": [200000]}\n }}\"\"\"\n pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)\n rdict1 = pdict1['policy']\n json2 = \"\"\"{\"policy\": {\n \"_AMEDT_ec\": {\"2016\": [[200000, 300000, 125000, 200000, 200000]],\n \"2017\": [[200000, 300000, 150000, 200000, 200000]],\n \"2018\": [[200000, 400000, 150000, 200000, 200000]],\n \"2019\": [[200000, 400000, 200000, 200000, 200000]]},\n \"_II_em\": {\"2015\": [15000], \"2020\": [20000]}\n }}\"\"\"\n pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)\n rdict2 = pdict2['policy']\n assert len(rdict2) == len(rdict1)\n for year in rdict2.keys():\n if '_II_em' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_II_em'],\n rdict2[year]['_II_em'],\n atol=0.01, rtol=0.0)\n if '_AMEDT_ec' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_AMEDT_ec'],\n rdict2[year]['_AMEDT_ec'],\n atol=0.01, rtol=0.0)\n\n\ndef test_translate_json_reform_suffixes_eic():\n # test read_json_param_objects(...)\n # using EIC-indexed parameter suffixes\n json1 = \"\"\"{\"policy\": {\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]},\n \"_EITC_c_0kids\": {\"2018\": [510], \"2019\": [510]},\n \"_EITC_c_1kid\": {\"2019\": [3400], \"2018\": [3400]},\n \"_EITC_c_2kids\": {\"2018\": [5616], \"2019\": [5616]},\n \"_EITC_c_3+kids\": {\"2019\": [6318], \"2018\": [6318]}\n }}\"\"\"\n pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)\n rdict1 = pdict1['policy']\n json2 = \"\"\"{\"policy\": {\n \"_EITC_c\": {\"2019\": [[510, 3400, 5616, 6318]],\n \"2018\": [[510, 3400, 5616, 6318]]},\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]}\n }}\"\"\"\n pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)\n rdict2 = pdict2['policy']\n assert len(rdict2) == len(rdict1)\n for year in rdict2.keys():\n if '_II_em' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_II_em'],\n rdict2[year]['_II_em'],\n atol=0.01, rtol=0.0)\n if '_EITC_c' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_EITC_c'],\n rdict2[year]['_EITC_c'],\n atol=0.01, rtol=0.0)\n\n\ndef test_translate_json_reform_suffixes_idedtype():\n # test read_json_param_objects(...)\n # using idedtype-indexed parameter suffixes\n json1 = \"\"\"{\"policy\": {\n \"_ID_BenefitCap_rt\": {\"2019\": [0.2]},\n \"_ID_BenefitCap_Switch_medical\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_casualty\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_misc\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_interest\": {\"2019\": [false]},\n \"_ID_BenefitCap_Switch_charity\": {\"2019\": [false]},\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]}\n }}\"\"\"\n pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)\n rdict1 = pdict1['policy']\n json2 = \"\"\"{\"policy\": {\n \"_II_em\": {\"2020\": [20000], \"2015\": [15000]},\n \"_ID_BenefitCap_Switch\": {\n \"2019\": [[false, true, true, false, false, false, false]]\n },\n \"_ID_BenefitCap_rt\": {\"2019\": [0.2]}\n }}\"\"\"\n pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)\n rdict2 = pdict2['policy']\n assert len(rdict2) == len(rdict1)\n for year in rdict2.keys():\n if '_II_em' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_II_em'],\n rdict2[year]['_II_em'],\n atol=0.01, rtol=0.0)\n if '_ID_BenefitCap_rt' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_ID_BenefitCap_rt'],\n rdict2[year]['_ID_BenefitCap_rt'],\n atol=0.01, rtol=0.0)\n if '_ID_BenefitCap_Switch' in rdict2[year].keys():\n assert np.allclose(rdict1[year]['_ID_BenefitCap_Switch'],\n rdict2[year]['_ID_BenefitCap_Switch'],\n atol=0.01, rtol=0.0)\n\n\ndef test_read_json_param_with_suffixes_and_errors():\n # test interaction of policy parameter suffixes and reform errors\n # (fails without 0.10.2 bug fix as reported by Hank Doupe in PB PR#641)\n reform = {\n u'policy': {\n u'_II_brk4_separate': {u'2017': [5000.0]},\n u'_STD_separate': {u'2017': [8000.0]},\n u'_STD_single': {u'2018': [1000.0]},\n u'_II_brk2_headhousehold': {u'2017': [1000.0]},\n u'_II_brk4_single': {u'2017': [500.0]},\n u'_STD_joint': {u'2017': [10000.0], u'2020': [150.0]},\n u'_II_brk2_separate': {u'2017': [1000.0]},\n u'_II_brk2_single': {u'2017': [1000.0]},\n u'_II_brk2_joint': {u'2017': [1000.0]},\n u'_FICA_ss_trt': {u'2017': [-1.0], u'2019': [0.1]},\n u'_II_brk4_headhousehold': {u'2017': [500.0]},\n u'_STD_headhousehold': {u'2017': [10000.0], u'2020': [150.0]},\n u'_II_brk4_joint': {u'2017': [500.0]},\n u'_ID_BenefitSurtax_Switch_medical': {u'2017': [True]}\n }\n }\n json_reform = json.dumps(reform)\n params = Calculator.read_json_param_objects(json_reform, None)\n assert isinstance(params, dict)\n pol = Policy()\n pol.ignore_reform_errors()\n pol.implement_reform(params['policy'],\n print_warnings=False, raise_errors=False)\n assert len(pol.parameter_errors) > 0\n assert len(pol.parameter_warnings) > 0\n\n\ndef test_noreform_documentation():\n reform_json = \"\"\"\n {\n \"policy\": {}\n }\n \"\"\"\n assump_json = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": {},\n \"growdiff_baseline\": {},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n params = Calculator.read_json_param_objects(reform_json, assump_json)\n assert isinstance(params, dict)\n actual_doc = Calculator.reform_documentation(params)\n expected_doc = (\n 'REFORM DOCUMENTATION\\n'\n 'Baseline Growth-Difference Assumption Values by Year:\\n'\n 'none: using default baseline growth assumptions\\n'\n 'Policy Reform Parameter Values by Year:\\n'\n 'none: using current-law policy parameters\\n'\n )\n assert actual_doc == expected_doc\n\n\ndef test_reform_documentation():\n reform_json = \"\"\"\n {\n \"policy\": {\n \"_II_em_cpi\": {\"2016\": false,\n \"2018\": true},\n \"_II_em\": {\"2016\": [5000],\n \"2018\": [6000],\n \"2020\": [7000]},\n \"_EITC_indiv\": {\"2017\": [true]},\n \"_STD_Aged_cpi\": {\"2016\": false},\n \"_STD_Aged\": {\"2016\": [[1600, 1300, 1300, 1600, 1600]],\n \"2020\": [[2000, 2000, 2000, 2000, 2000]]},\n \"_ID_BenefitCap_Switch_medical\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_casualty\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_misc\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_interest\": {\"2020\": [false]},\n \"_ID_BenefitCap_Switch_charity\": {\"2020\": [false]}\n }\n }\n \"\"\"\n assump_json = \"\"\"\n {\n \"consumption\": {},\n \"behavior\": {},\n // increase baseline inflation rate by one percentage point in 2014+\n // (has no effect on known policy parameter values)\n \"growdiff_baseline\": {\"_ACPIU\": {\"2014\": [0.01]}},\n \"growdiff_response\": {},\n \"growmodel\": {}\n }\n \"\"\"\n params = Calculator.read_json_param_objects(reform_json, assump_json)\n assert isinstance(params, dict)\n doc = Calculator.reform_documentation(params)\n assert isinstance(doc, six.string_types)\n dump = False # set to True to print documentation and force test failure\n if dump:\n print(doc)\n assert 1 == 2\n\n\ndef test_distribution_tables(cps_subsample):\n pol = Policy()\n recs = Records.cps_constructor(data=cps_subsample)\n calc1 = Calculator(policy=pol, records=recs)\n assert calc1.current_year == 2014\n calc1.calc_all()\n dt1, dt2 = calc1.distribution_tables(None, 'weighted_deciles')\n assert isinstance(dt1, pd.DataFrame)\n assert dt2 is None\n dt1, dt2 = calc1.distribution_tables(calc1, 'weighted_deciles')\n assert isinstance(dt1, pd.DataFrame)\n assert isinstance(dt2, pd.DataFrame)\n reform = {2014: {'_UBI_u18': [1000],\n '_UBI_1820': [1000],\n '_UBI_21': [1000]}}\n pol.implement_reform(reform)\n assert not pol.parameter_errors\n calc2 = Calculator(policy=pol, records=recs)\n calc2.calc_all()\n dt1, dt2 = calc1.distribution_tables(calc2, 'weighted_deciles')\n assert isinstance(dt1, pd.DataFrame)\n assert isinstance(dt2, pd.DataFrame)\n\n\ndef test_difference_table(cps_subsample):\n cyr = 2014\n pol = Policy()\n recs = Records.cps_constructor(data=cps_subsample)\n calc1 = Calculator(policy=pol, records=recs)\n assert calc1.current_year == cyr\n reform = {cyr: {'_SS_Earnings_c': [9e99]}}\n pol.implement_reform(reform)\n calc2 = Calculator(policy=pol, records=recs)\n assert calc2.current_year == cyr\n calc1.calc_all()\n calc2.calc_all()\n diff = calc1.difference_table(calc2, 'weighted_deciles', 'iitax')\n assert isinstance(diff, pd.DataFrame)\n\n\ndef test_diagnostic_table(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n adt = calc.diagnostic_table(3)\n assert isinstance(adt, pd.DataFrame)\n\n\ndef test_mtr_graph(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n fig = calc.mtr_graph(calc,\n mars=2,\n income_measure='wages',\n mtr_measure='ptax')\n assert fig\n fig = calc.mtr_graph(calc,\n income_measure='agi',\n mtr_measure='itax')\n assert fig\n\n\ndef test_atr_graph(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n fig = calc.atr_graph(calc, mars=2, atr_measure='itax')\n assert fig\n fig = calc.atr_graph(calc, atr_measure='ptax')\n assert fig\n\n\ndef test_privacy_of_embedded_objects(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n with pytest.raises(AttributeError):\n cyr = calc.__policy.current_year\n with pytest.raises(AttributeError):\n wgh = calc.__records.s006\n with pytest.raises(AttributeError):\n cyr = calc.__consumption.current_year\n with pytest.raises(AttributeError):\n cyr = calc.__behavior.current_year\n\n\ndef test_n65(cps_subsample):\n recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)\n calc = Calculator(policy=Policy(), records=recs)\n assert calc.n65().sum() > 1500\n", "\"\"\"\nTax-Calculator federal tax Calculator class.\n\"\"\"\n# CODING-STYLE CHECKS:\n# pycodestyle calculate.py\n# pylint --disable=locally-disabled calculate.py\n#\n# pylint: disable=invalid-name,no-value-for-parameter,too-many-lines\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport json\nimport re\nimport copy\nimport six\nimport numpy as np\nimport pandas as pd\nfrom taxcalc.functions import (TaxInc, SchXYZTax, GainsTax, AGIsurtax,\n NetInvIncTax, AMT, EI_PayrollTax, Adj,\n DependentCare, ALD_InvInc_ec_base, CapGains,\n SSBenefits, UBI, AGI, ItemDedCap, ItemDed,\n StdDed, AdditionalMedicareTax, F2441, EITC,\n ChildDepTaxCredit, AdditionalCTC, CTC_new,\n PersonalTaxCredit, SchR,\n AmOppCreditParts, EducationTaxCredit,\n NonrefundableCredits, C1040, IITAX,\n BenefitSurtax, BenefitLimitation,\n FairShareTax, LumpSumTax, BenefitPrograms,\n ExpandIncome, AfterTaxIncome)\nfrom taxcalc.policy import Policy\nfrom taxcalc.records import Records\nfrom taxcalc.consumption import Consumption\nfrom taxcalc.behavior import Behavior\nfrom taxcalc.growdiff import GrowDiff\nfrom taxcalc.growfactors import GrowFactors\nfrom taxcalc.utils import (DIST_VARIABLES, create_distribution_table,\n DIFF_VARIABLES, create_difference_table,\n create_diagnostic_table,\n ce_aftertax_expanded_income,\n mtr_graph_data, atr_graph_data, xtr_graph_plot,\n dec_graph_data, dec_graph_plot,\n pch_graph_data, pch_graph_plot)\n# import pdb\n\n\nclass Calculator(object):\n \"\"\"\n Constructor for the Calculator class.\n\n Parameters\n ----------\n policy: Policy class object\n this argument must be specified and object is copied for internal use\n\n records: Records class object\n this argument must be specified and object is copied for internal use\n\n verbose: boolean\n specifies whether or not to write to stdout data-loaded and\n data-extrapolated progress reports; default value is true.\n\n sync_years: boolean\n specifies whether or not to synchronize policy year and records year;\n default value is true.\n\n consumption: Consumption class object\n specifies consumption response assumptions used to calculate\n \"effective\" marginal tax rates; default is None, which implies\n no consumption responses assumed in marginal tax rate calculations;\n when argument is an object it is copied for internal use;\n also specifies consumption value of in-kind benefis with no in-kind\n consumption values specified implying consumption value is equal to\n government cost of providing the in-kind benefits\n\n behavior: Behavior class object\n specifies behavioral responses used by Calculator; default is None,\n which implies no behavioral responses to policy reform;\n when argument is an object it is copied for internal use\n\n Raises\n ------\n ValueError:\n if parameters are not the appropriate type.\n\n Returns\n -------\n class instance: Calculator\n\n Notes\n -----\n The most efficient way to specify current-law and reform Calculator\n objects is as follows:\n pol = Policy()\n rec = Records()\n calc1 = Calculator(policy=pol, records=rec) # current-law\n pol.implement_reform(...)\n calc2 = Calculator(policy=pol, records=rec) # reform\n All calculations are done on the internal copies of the Policy and\n Records objects passed to each of the two Calculator constructors.\n \"\"\"\n # pylint: disable=too-many-public-methods\n\n def __init__(self, policy=None, records=None, verbose=True,\n sync_years=True, consumption=None, behavior=None):\n # pylint: disable=too-many-arguments,too-many-branches\n if isinstance(policy, Policy):\n self.__policy = copy.deepcopy(policy)\n else:\n raise ValueError('must specify policy as a Policy object')\n if isinstance(records, Records):\n self.__records = copy.deepcopy(records)\n else:\n raise ValueError('must specify records as a Records object')\n if self.__policy.current_year < self.__records.data_year:\n self.__policy.set_year(self.__records.data_year)\n if consumption is None:\n self.__consumption = Consumption(start_year=policy.start_year)\n elif isinstance(consumption, Consumption):\n self.__consumption = copy.deepcopy(consumption)\n else:\n raise ValueError('consumption must be None or Consumption object')\n if self.__consumption.current_year < self.__policy.current_year:\n self.__consumption.set_year(self.__policy.current_year)\n if behavior is None:\n self.__behavior = Behavior(start_year=policy.start_year)\n elif isinstance(behavior, Behavior):\n self.__behavior = copy.deepcopy(behavior)\n else:\n raise ValueError('behavior must be None or Behavior object')\n if self.__behavior.current_year < self.__policy.current_year:\n self.__behavior.set_year(self.__policy.current_year)\n current_year_is_data_year = (\n self.__records.current_year == self.__records.data_year)\n if sync_years and current_year_is_data_year:\n if verbose:\n print('You loaded data for ' +\n str(self.__records.data_year) + '.')\n if self.__records.IGNORED_VARS:\n print('Your data include the following unused ' +\n 'variables that will be ignored:')\n for var in self.__records.IGNORED_VARS:\n print(' ' +\n var)\n while self.__records.current_year < self.__policy.current_year:\n self.__records.increment_year()\n if verbose:\n print('Tax-Calculator startup automatically ' +\n 'extrapolated your data to ' +\n str(self.__records.current_year) + '.')\n if verbose and sys.version_info.major == 2: # running Python 2.7\n print( # pragma: no cover\n ('WARNING: Tax-Calculator packages for Python 2.7 will\\n'\n ' no longer be provided beginning in 2019\\n'\n ' because Pandas is stopping development for 2.7\\n'\n 'SOLUTION: upgrade to Python 3.6 now')\n )\n assert self.__policy.current_year == self.__records.current_year\n self.__stored_records = None\n\n def increment_year(self):\n \"\"\"\n Advance all embedded objects to next year.\n \"\"\"\n next_year = self.__policy.current_year + 1\n self.__records.increment_year()\n self.__policy.set_year(next_year)\n self.__consumption.set_year(next_year)\n self.__behavior.set_year(next_year)\n\n def advance_to_year(self, year):\n \"\"\"\n The advance_to_year function gives an optional way of implementing\n increment year functionality by immediately specifying the year\n as input. New year must be at least the current year.\n \"\"\"\n iteration = year - self.current_year\n if iteration < 0:\n raise ValueError('New current year must be ' +\n 'greater than current year!')\n for _ in range(iteration):\n self.increment_year()\n assert self.current_year == year\n\n def calc_all(self, zero_out_calc_vars=False):\n \"\"\"\n Call all tax-calculation functions for the current_year.\n \"\"\"\n # conducts static analysis of Calculator object for current_year\n assert self.__records.current_year == self.__policy.current_year\n BenefitPrograms(self)\n self._calc_one_year(zero_out_calc_vars)\n BenefitSurtax(self)\n BenefitLimitation(self)\n FairShareTax(self.__policy, self.__records)\n LumpSumTax(self.__policy, self.__records)\n ExpandIncome(self.__policy, self.__records)\n AfterTaxIncome(self.__policy, self.__records)\n\n def weighted_total(self, variable_name):\n \"\"\"\n Return all-filing-unit weighted total of named Records variable.\n \"\"\"\n return (self.array(variable_name) * self.array('s006')).sum()\n\n def total_weight(self):\n \"\"\"\n Return all-filing-unit total of sampling weights.\n NOTE: var_weighted_mean = calc.weighted_total(var)/calc.total_weight()\n \"\"\"\n return self.array('s006').sum()\n\n def dataframe(self, variable_list):\n \"\"\"\n Return pandas DataFrame containing the listed variables from embedded\n Records object.\n \"\"\"\n assert isinstance(variable_list, list)\n arys = [self.array(vname) for vname in variable_list]\n pdf = pd.DataFrame(data=np.column_stack(arys), columns=variable_list)\n del arys\n return pdf\n\n def distribution_table_dataframe(self):\n \"\"\"\n Return pandas DataFrame containing the DIST_TABLE_COLUMNS variables\n from embedded Records object.\n \"\"\"\n pdf = self.dataframe(DIST_VARIABLES)\n # weighted count of itemized-deduction returns\n pdf['num_returns_ItemDed'] = pdf['s006'].where(\n pdf['c04470'] > 0., 0.)\n # weighted count of standard-deduction returns\n pdf['num_returns_StandardDed'] = pdf['s006'].where(\n pdf['standard'] > 0., 0.)\n # weight count of returns with positive Alternative Minimum Tax (AMT)\n pdf['num_returns_AMT'] = pdf['s006'].where(\n pdf['c09600'] > 0., 0.)\n return pdf\n\n def array(self, variable_name, variable_value=None):\n \"\"\"\n If variable_value is None, return numpy ndarray containing the\n named variable in embedded Records object.\n If variable_value is not None, set named variable in embedded Records\n object to specified variable_value and return None (which can be\n ignored).\n \"\"\"\n if variable_value is None:\n return getattr(self.__records, variable_name)\n assert isinstance(variable_value, np.ndarray)\n setattr(self.__records, variable_name, variable_value)\n return None\n\n def n65(self):\n \"\"\"\n Return numpy ndarray containing the number of\n individuals age 65+ in each filing unit.\n \"\"\"\n vdf = self.dataframe(['age_head', 'age_spouse', 'elderly_dependents'])\n return ((vdf['age_head'] >= 65).astype(int) +\n (vdf['age_spouse'] >= 65).astype(int) +\n vdf['elderly_dependents'])\n\n def incarray(self, variable_name, variable_add):\n \"\"\"\n Add variable_add to named variable in embedded Records object.\n \"\"\"\n assert isinstance(variable_add, np.ndarray)\n setattr(self.__records, variable_name,\n self.array(variable_name) + variable_add)\n\n def zeroarray(self, variable_name):\n \"\"\"\n Set named variable in embedded Records object to zeros.\n \"\"\"\n setattr(self.__records, variable_name, np.zeros(self.array_len))\n\n def store_records(self):\n \"\"\"\n Make internal copy of embedded Records object that can then be\n restored after interim calculations that make temporary changes\n to the embedded Records object.\n \"\"\"\n assert self.__stored_records is None\n self.__stored_records = copy.deepcopy(self.__records)\n\n def restore_records(self):\n \"\"\"\n Set the embedded Records object to the stored Records object\n that was saved in the last call to the store_records() method.\n \"\"\"\n assert isinstance(self.__stored_records, Records)\n self.__records = copy.deepcopy(self.__stored_records)\n del self.__stored_records\n self.__stored_records = None\n\n def records_current_year(self, year=None):\n \"\"\"\n If year is None, return current_year of embedded Records object.\n If year is not None, set embedded Records current_year to year and\n return None (which can be ignored).\n \"\"\"\n if year is None:\n return self.__records.current_year\n assert isinstance(year, int)\n self.__records.set_current_year(year)\n return None\n\n @property\n def array_len(self):\n \"\"\"\n Length of arrays in embedded Records object.\n \"\"\"\n return self.__records.array_length\n\n def policy_param(self, param_name, param_value=None):\n \"\"\"\n If param_value is None, return named parameter in\n embedded Policy object.\n If param_value is not None, set named parameter in\n embedded Policy object to specified param_value and\n return None (which can be ignored).\n \"\"\"\n if param_value is None:\n return getattr(self.__policy, param_name)\n setattr(self.__policy, param_name, param_value)\n return None\n\n def consump_param(self, param_name):\n \"\"\"\n Return value of named parameter in embedded Consumption object.\n \"\"\"\n return getattr(self.__consumption, param_name)\n\n def consump_benval_params(self):\n \"\"\"\n Return list of benefit-consumption-value parameter values\n in embedded Consumption object.\n \"\"\"\n return self.__consumption.benval_params()\n\n def behavior_has_response(self):\n \"\"\"\n Return True if embedded Behavior object has response;\n otherwise return False.\n \"\"\"\n return self.__behavior.has_response()\n\n def behavior(self, param_name, param_value=None):\n \"\"\"\n If param_value is None, return named parameter in\n embedded Behavior object.\n If param_value is not None, set named parameter in\n embedded Behavior object to specified param_value and\n return None (which can be ignored).\n \"\"\"\n if param_value is None:\n return getattr(self.__behavior, param_name)\n setattr(self.__behavior, param_name, param_value)\n return None\n\n def records_include_behavioral_responses(self):\n \"\"\"\n Mark embedded Records object as including behavioral responses\n \"\"\"\n self.__records.behavioral_responses_are_included = True\n\n @property\n def reform_warnings(self):\n \"\"\"\n Calculator class embedded Policy object's reform_warnings.\n \"\"\"\n return self.__policy.parameter_warnings\n\n def policy_current_year(self, year=None):\n \"\"\"\n If year is None, return current_year of embedded Policy object.\n If year is not None, set embedded Policy current_year to year and\n return None (which can be ignored).\n \"\"\"\n if year is None:\n return self.__policy.current_year\n assert isinstance(year, int)\n self.__policy.set_year(year)\n return None\n\n @property\n def current_year(self):\n \"\"\"\n Calculator class current calendar year property.\n \"\"\"\n return self.__policy.current_year\n\n @property\n def data_year(self):\n \"\"\"\n Calculator class initial (i.e., first) records data year property.\n \"\"\"\n return self.__records.data_year\n\n def diagnostic_table(self, num_years):\n \"\"\"\n Generate multi-year diagnostic table containing aggregate statistics;\n this method leaves the Calculator object unchanged.\n\n Parameters\n ----------\n num_years : Integer\n number of years to include in diagnostic table starting\n with the Calculator object's current_year (must be at least\n one and no more than what would exceed Policy end_year)\n\n Returns\n -------\n Pandas DataFrame object containing the multi-year diagnostic table\n \"\"\"\n assert num_years >= 1\n max_num_years = self.__policy.end_year - self.__policy.current_year + 1\n assert num_years <= max_num_years\n diag_variables = DIST_VARIABLES + ['surtax']\n calc = copy.deepcopy(self)\n tlist = list()\n for iyr in range(1, num_years + 1):\n assert calc.behavior_has_response() is False\n calc.calc_all()\n diag = create_diagnostic_table(calc.dataframe(diag_variables),\n calc.current_year)\n tlist.append(diag)\n if iyr < num_years:\n calc.increment_year()\n del diag_variables\n del calc\n del diag\n return pd.concat(tlist, axis=1)\n\n def distribution_tables(self, calc, groupby):\n \"\"\"\n Get results from self and calc, sort them by expanded_income into\n table rows defined by groupby, compute grouped statistics, and\n return tables as a pair of Pandas dataframes.\n This method leaves the Calculator object(s) unchanged.\n Note that the returned tables have consistent income groups (based\n on the self expanded_income) even though the baseline expanded_income\n in self and the reform expanded_income in calc are different.\n\n Parameters\n ----------\n calc : Calculator object or None\n typically represents the reform while self represents the baseline;\n if calc is None, the second returned table is None\n\n groupby : String object\n options for input: 'weighted_deciles', 'standard_income_bins'\n determines how the columns in resulting Pandas DataFrame are sorted\n\n Return and typical usage\n ------------------------\n dist1, dist2 = calc1.distribution_tables(calc2, 'weighted_deciles')\n OR\n dist1, _ = calc1.distribution_tables(None, 'weighted_deciles')\n (where calc1 is a baseline Calculator object\n and calc2 is a reform Calculator object).\n Each of the dist1 and optional dist2 is a distribution table as a\n Pandas DataFrame with DIST_TABLE_COLUMNS and groupby rows.\n NOTE: when groupby is 'weighted_deciles', the returned tables have 3\n extra rows containing top-decile detail consisting of statistics\n for the 0.90-0.95 quantile range (bottom half of top decile),\n for the 0.95-0.99 quantile range, and\n for the 0.99-1.00 quantile range (top one percent); and the\n returned table splits the bottom decile into filing units with\n negative (denoted by a 0-10n row label),\n zero (denoted by a 0-10z row label), and\n positive (denoted by a 0-10p row label) values of the\n specified income_measure.\n \"\"\"\n # nested function used only by this method\n def have_same_income_measure(calc1, calc2):\n \"\"\"\n Return true if calc1 and calc2 contain the same expanded_income;\n otherwise, return false. (Note that \"same\" means nobody's\n expanded_income differs by more than one cent.)\n \"\"\"\n im1 = calc1.array('expanded_income')\n im2 = calc2.array('expanded_income')\n return np.allclose(im1, im2, rtol=0.0, atol=0.01)\n # main logic of method\n assert calc is None or isinstance(calc, Calculator)\n assert (groupby == 'weighted_deciles' or\n groupby == 'standard_income_bins')\n if calc is not None:\n assert np.allclose(self.array('s006'),\n calc.array('s006')) # check rows in same order\n var_dataframe = self.distribution_table_dataframe()\n imeasure = 'expanded_income'\n dt1 = create_distribution_table(var_dataframe, groupby, imeasure)\n del var_dataframe\n if calc is None:\n dt2 = None\n else:\n assert calc.current_year == self.current_year\n assert calc.array_len == self.array_len\n assert np.allclose(self.consump_benval_params(),\n calc.consump_benval_params())\n var_dataframe = calc.distribution_table_dataframe()\n if have_same_income_measure(self, calc):\n imeasure = 'expanded_income'\n else:\n imeasure = 'expanded_income_baseline'\n var_dataframe[imeasure] = self.array('expanded_income')\n dt2 = create_distribution_table(var_dataframe, groupby, imeasure)\n del var_dataframe\n return (dt1, dt2)\n\n def difference_table(self, calc, groupby, tax_to_diff):\n \"\"\"\n Get results from self and calc, sort them by expanded_income into\n table rows defined by groupby, compute grouped statistics, and\n return tax-difference table as a Pandas dataframe.\n This method leaves the Calculator objects unchanged.\n Note that the returned tables have consistent income groups (based\n on the self expanded_income) even though the baseline expanded_income\n in self and the reform expanded_income in calc are different.\n\n Parameters\n ----------\n calc : Calculator object\n calc represents the reform while self represents the baseline\n\n groupby : String object\n options for input: 'weighted_deciles', 'standard_income_bins'\n determines how the columns in resulting Pandas DataFrame are sorted\n\n tax_to_diff : String object\n options for input: 'iitax', 'payrolltax', 'combined'\n specifies which tax to difference\n\n Returns and typical usage\n -------------------------\n diff = calc1.difference_table(calc2, 'weighted_deciles', 'iitax')\n (where calc1 is a baseline Calculator object\n and calc2 is a reform Calculator object).\n The returned diff is a difference table as a Pandas DataFrame\n with DIST_TABLE_COLUMNS and groupby rows.\n NOTE: when groupby is 'weighted_deciles', the returned table has three\n extra rows containing top-decile detail consisting of statistics\n for the 0.90-0.95 quantile range (bottom half of top decile),\n for the 0.95-0.99 quantile range, and\n for the 0.99-1.00 quantile range (top one percent); and the\n returned table splits the bottom decile into filing units with\n negative (denoted by a 0-10n row label),\n zero (denoted by a 0-10z row label), and\n positive (denoted by a 0-10p row label) values of the\n specified income_measure.\n \"\"\"\n assert isinstance(calc, Calculator)\n assert calc.current_year == self.current_year\n assert calc.array_len == self.array_len\n assert np.allclose(self.consump_benval_params(),\n calc.consump_benval_params())\n self_var_dataframe = self.dataframe(DIFF_VARIABLES)\n calc_var_dataframe = calc.dataframe(DIFF_VARIABLES)\n diff = create_difference_table(self_var_dataframe,\n calc_var_dataframe,\n groupby, tax_to_diff)\n del self_var_dataframe\n del calc_var_dataframe\n return diff\n\n MTR_VALID_VARIABLES = ['e00200p', 'e00200s',\n 'e00900p', 'e00300',\n 'e00400', 'e00600',\n 'e00650', 'e01400',\n 'e01700', 'e02000',\n 'e02400', 'p22250',\n 'p23250', 'e18500',\n 'e19200', 'e26270',\n 'e19800', 'e20100']\n\n def mtr(self, variable_str='e00200p',\n negative_finite_diff=False,\n zero_out_calculated_vars=False,\n calc_all_already_called=False,\n wrt_full_compensation=True):\n \"\"\"\n Calculates the marginal payroll, individual income, and combined\n tax rates for every tax filing unit, leaving the Calculator object\n in exactly the same state as it would be in after a calc_all() call.\n\n The marginal tax rates are approximated as the change in tax\n liability caused by a small increase (the finite_diff) in the variable\n specified by the variable_str divided by that small increase in the\n variable, when wrt_full_compensation is false.\n\n If wrt_full_compensation is true, then the marginal tax rates\n are computed as the change in tax liability divided by the change\n in total compensation caused by the small increase in the variable\n (where the change in total compensation is the sum of the small\n increase in the variable and any increase in the employer share of\n payroll taxes caused by the small increase in the variable).\n\n If using 'e00200s' as variable_str, the marginal tax rate for all\n records where MARS != 2 will be missing. If you want to perform a\n function such as np.mean() on the returned arrays, you will need to\n account for this.\n\n Parameters\n ----------\n variable_str: string\n specifies type of income or expense that is increased to compute\n the marginal tax rates. See Notes for list of valid variables.\n\n negative_finite_diff: boolean\n specifies whether or not marginal tax rates are computed by\n subtracting (rather than adding) a small finite_diff amount\n to the specified variable.\n\n zero_out_calculated_vars: boolean\n specifies value of zero_out_calc_vars parameter used in calls\n of Calculator.calc_all() method.\n\n calc_all_already_called: boolean\n specifies whether self has already had its Calculor.calc_all()\n method called, in which case this method will not do a final\n calc_all() call but use the incoming embedded Records object\n as the outgoing Records object embedding in self.\n\n wrt_full_compensation: boolean\n specifies whether or not marginal tax rates on earned income\n are computed with respect to (wrt) changes in total compensation\n that includes the employer share of OASDI and HI payroll taxes.\n\n Returns\n -------\n A tuple of numpy arrays in the following order:\n mtr_payrolltax: an array of marginal payroll tax rates.\n mtr_incometax: an array of marginal individual income tax rates.\n mtr_combined: an array of marginal combined tax rates, which is\n the sum of mtr_payrolltax and mtr_incometax.\n\n Notes\n -----\n The arguments zero_out_calculated_vars and calc_all_already_called\n cannot both be true.\n\n Valid variable_str values are:\n 'e00200p', taxpayer wage/salary earnings (also included in e00200);\n 'e00200s', spouse wage/salary earnings (also included in e00200);\n 'e00900p', taxpayer Schedule C self-employment income (also in e00900);\n 'e00300', taxable interest income;\n 'e00400', federally-tax-exempt interest income;\n 'e00600', all dividends included in AGI\n 'e00650', qualified dividends (also included in e00600)\n 'e01400', federally-taxable IRA distribution;\n 'e01700', federally-taxable pension benefits;\n 'e02000', Schedule E total net income/loss\n 'e02400', all social security (OASDI) benefits;\n 'p22250', short-term capital gains;\n 'p23250', long-term capital gains;\n 'e18500', Schedule A real-estate-tax paid;\n 'e19200', Schedule A interest paid;\n 'e26270', S-corporation/partnership income (also included in e02000);\n 'e19800', Charity cash contributions;\n 'e20100', Charity non-cash contributions.\n \"\"\"\n # pylint: disable=too-many-arguments,too-many-statements\n # pylint: disable=too-many-locals,too-many-branches\n assert not zero_out_calculated_vars or not calc_all_already_called\n # check validity of variable_str parameter\n if variable_str not in Calculator.MTR_VALID_VARIABLES:\n msg = 'mtr variable_str=\"{}\" is not valid'\n raise ValueError(msg.format(variable_str))\n # specify value for finite_diff parameter\n finite_diff = 0.01 # a one-cent difference\n if negative_finite_diff:\n finite_diff *= -1.0\n # remember records object in order to restore it after mtr computations\n self.store_records()\n # extract variable array(s) from embedded records object\n variable = self.array(variable_str)\n if variable_str == 'e00200p':\n earnings_var = self.array('e00200')\n elif variable_str == 'e00200s':\n earnings_var = self.array('e00200')\n elif variable_str == 'e00900p':\n seincome_var = self.array('e00900')\n elif variable_str == 'e00650':\n divincome_var = self.array('e00600')\n elif variable_str == 'e26270':\n schEincome_var = self.array('e02000')\n # calculate level of taxes after a marginal increase in income\n self.array(variable_str, variable + finite_diff)\n if variable_str == 'e00200p':\n self.array('e00200', earnings_var + finite_diff)\n elif variable_str == 'e00200s':\n self.array('e00200', earnings_var + finite_diff)\n elif variable_str == 'e00900p':\n self.array('e00900', seincome_var + finite_diff)\n elif variable_str == 'e00650':\n self.array('e00600', divincome_var + finite_diff)\n elif variable_str == 'e26270':\n self.array('e02000', schEincome_var + finite_diff)\n if self.__consumption.has_response():\n self.__consumption.response(self.__records, finite_diff)\n self.calc_all(zero_out_calc_vars=zero_out_calculated_vars)\n payrolltax_chng = self.array('payrolltax')\n incometax_chng = self.array('iitax')\n combined_taxes_chng = incometax_chng + payrolltax_chng\n # calculate base level of taxes after restoring records object\n self.restore_records()\n if not calc_all_already_called or zero_out_calculated_vars:\n self.calc_all(zero_out_calc_vars=zero_out_calculated_vars)\n payrolltax_base = self.array('payrolltax')\n incometax_base = self.array('iitax')\n combined_taxes_base = incometax_base + payrolltax_base\n # compute marginal changes in combined tax liability\n payrolltax_diff = payrolltax_chng - payrolltax_base\n incometax_diff = incometax_chng - incometax_base\n combined_diff = combined_taxes_chng - combined_taxes_base\n # specify optional adjustment for employer (er) OASDI+HI payroll taxes\n mtr_on_earnings = (variable_str == 'e00200p' or\n variable_str == 'e00200s')\n if wrt_full_compensation and mtr_on_earnings:\n adj = np.where(variable < self.policy_param('SS_Earnings_c'),\n 0.5 * (self.policy_param('FICA_ss_trt') +\n self.policy_param('FICA_mc_trt')),\n 0.5 * self.policy_param('FICA_mc_trt'))\n else:\n adj = 0.0\n # compute marginal tax rates\n mtr_payrolltax = payrolltax_diff / (finite_diff * (1.0 + adj))\n mtr_incometax = incometax_diff / (finite_diff * (1.0 + adj))\n mtr_combined = combined_diff / (finite_diff * (1.0 + adj))\n # if variable_str is e00200s, set MTR to NaN for units without a spouse\n if variable_str == 'e00200s':\n mars = self.array('MARS')\n mtr_payrolltax = np.where(mars == 2, mtr_payrolltax, np.nan)\n mtr_incometax = np.where(mars == 2, mtr_incometax, np.nan)\n mtr_combined = np.where(mars == 2, mtr_combined, np.nan)\n # delete intermediate variables\n del variable\n if variable_str == 'e00200p' or variable_str == 'e00200s':\n del earnings_var\n elif variable_str == 'e00900p':\n del seincome_var\n elif variable_str == 'e00650':\n del divincome_var\n elif variable_str == 'e26270':\n del schEincome_var\n del payrolltax_chng\n del incometax_chng\n del combined_taxes_chng\n del payrolltax_base\n del incometax_base\n del combined_taxes_base\n del payrolltax_diff\n del incometax_diff\n del combined_diff\n del adj\n # return the three marginal tax rate arrays\n return (mtr_payrolltax, mtr_incometax, mtr_combined)\n\n def mtr_graph(self, calc,\n mars='ALL',\n mtr_measure='combined',\n mtr_variable='e00200p',\n alt_e00200p_text='',\n mtr_wrt_full_compen=False,\n income_measure='expanded_income',\n dollar_weighting=False):\n \"\"\"\n Create marginal tax rate graph that can be written to an HTML\n file (using the write_graph_file utility function) or shown on\n the screen immediately in an interactive or notebook session\n (following the instructions in the documentation of the\n xtr_graph_plot utility function).\n\n Parameters\n ----------\n calc : Calculator object\n calc represents the reform while self represents the baseline\n\n mars : integer or string\n specifies which filing status subgroup to show in the graph\n\n - 'ALL': include all filing units in sample\n\n - 1: include only single filing units\n\n - 2: include only married-filing-jointly filing units\n\n - 3: include only married-filing-separately filing units\n\n - 4: include only head-of-household filing units\n\n mtr_measure : string\n specifies which marginal tax rate to show on graph's y axis\n\n - 'itax': marginal individual income tax rate\n\n - 'ptax': marginal payroll tax rate\n\n - 'combined': sum of marginal income and payroll tax rates\n\n mtr_variable : string\n any string in the Calculator.VALID_MTR_VARS set\n specifies variable to change in order to compute marginal tax rates\n\n alt_e00200p_text : string\n text to use in place of mtr_variable\n when mtr_variable is 'e00200p';\n if empty string then use 'e00200p'\n\n mtr_wrt_full_compen : boolean\n see documentation of Calculator.mtr()\n argument wrt_full_compensation\n (value has an effect only if mtr_variable is 'e00200p')\n\n income_measure : string\n specifies which income variable to show on the graph's x axis\n\n - 'wages': wage and salary income (e00200)\n\n - 'agi': adjusted gross income, AGI (c00100)\n\n - 'expanded_income': broader than AGI (see definition in\n functions.py file).\n\n dollar_weighting : boolean\n False implies both income_measure percentiles on x axis\n and mtr values for each percentile on the y axis are\n computed without using dollar income_measure weights (just\n sampling weights); True implies both income_measure\n percentiles on x axis and mtr values for each percentile\n on the y axis are computed using dollar income_measure\n weights (in addition to sampling weights). Specifying\n True produces a graph x axis that shows income_measure\n (not filing unit) percentiles.\n\n Returns\n -------\n graph that is a bokeh.plotting figure object\n \"\"\"\n # pylint: disable=too-many-arguments,too-many-locals\n # check that two Calculator objects are comparable\n assert isinstance(calc, Calculator)\n assert calc.current_year == self.current_year\n assert calc.array_len == self.array_len\n # check validity of mars parameter\n assert mars == 'ALL' or (mars >= 1 and mars <= 4)\n # check validity of income_measure\n assert (income_measure == 'expanded_income' or\n income_measure == 'agi' or\n income_measure == 'wages')\n if income_measure == 'expanded_income':\n income_variable = 'expanded_income'\n elif income_measure == 'agi':\n income_variable = 'c00100'\n elif income_measure == 'wages':\n income_variable = 'e00200'\n # check validity of mtr_measure parameter\n assert (mtr_measure == 'combined' or\n mtr_measure == 'itax' or\n mtr_measure == 'ptax')\n # calculate marginal tax rates\n (mtr1_ptax, mtr1_itax,\n mtr1_combined) = self.mtr(variable_str=mtr_variable,\n wrt_full_compensation=mtr_wrt_full_compen)\n (mtr2_ptax, mtr2_itax,\n mtr2_combined) = calc.mtr(variable_str=mtr_variable,\n wrt_full_compensation=mtr_wrt_full_compen)\n if mtr_measure == 'combined':\n mtr1 = mtr1_combined\n mtr2 = mtr2_combined\n elif mtr_measure == 'itax':\n mtr1 = mtr1_itax\n mtr2 = mtr2_itax\n elif mtr_measure == 'ptax':\n mtr1 = mtr1_ptax\n mtr2 = mtr2_ptax\n # extract datafames needed by mtr_graph_data utility function\n record_variables = ['s006']\n if mars != 'ALL':\n record_variables.append('MARS')\n record_variables.append(income_variable)\n vdf = self.dataframe(record_variables)\n vdf['mtr1'] = mtr1\n vdf['mtr2'] = mtr2\n # select filing-status subgroup, if any\n if mars != 'ALL':\n vdf = vdf[vdf['MARS'] == mars]\n # construct data for graph\n data = mtr_graph_data(vdf,\n year=self.current_year,\n mars=mars,\n mtr_measure=mtr_measure,\n alt_e00200p_text=alt_e00200p_text,\n mtr_wrt_full_compen=mtr_wrt_full_compen,\n income_measure=income_measure,\n dollar_weighting=dollar_weighting)\n # delete intermediate variables\n del vdf\n del mtr1_ptax\n del mtr1_itax\n del mtr1_combined\n del mtr1\n del mtr2_ptax\n del mtr2_itax\n del mtr2_combined\n del mtr2\n del record_variables\n # construct figure from data\n fig = xtr_graph_plot(data,\n width=850,\n height=500,\n xlabel='',\n ylabel='',\n title='',\n legendloc='bottom_right')\n del data\n return fig\n\n def atr_graph(self, calc,\n mars='ALL',\n atr_measure='combined'):\n \"\"\"\n Create average tax rate graph that can be written to an HTML\n file (using the write_graph_file utility function) or shown on\n the screen immediately in an interactive or notebook session\n (following the instructions in the documentation of the\n xtr_graph_plot utility function). The graph shows the mean\n average tax rate for each expanded-income percentile excluding\n any percentile that includes a filing unit with negative or\n zero basline (self) expanded income.\n\n Parameters\n ----------\n calc : Calculator object\n calc represents the reform while self represents the baseline,\n where both self and calc have calculated taxes for this year\n before being used by this method\n\n mars : integer or string\n specifies which filing status subgroup to show in the graph\n\n - 'ALL': include all filing units in sample\n\n - 1: include only single filing units\n\n - 2: include only married-filing-jointly filing units\n\n - 3: include only married-filing-separately filing units\n\n - 4: include only head-of-household filing units\n\n atr_measure : string\n specifies which average tax rate to show on graph's y axis\n\n - 'itax': average individual income tax rate\n\n - 'ptax': average payroll tax rate\n\n - 'combined': sum of average income and payroll tax rates\n\n Returns\n -------\n graph that is a bokeh.plotting figure object\n \"\"\"\n # check that two Calculator objects are comparable\n assert isinstance(calc, Calculator)\n assert calc.current_year == self.current_year\n assert calc.array_len == self.array_len\n # check validity of function arguments\n assert mars == 'ALL' or (mars >= 1 and mars <= 4)\n assert (atr_measure == 'combined' or\n atr_measure == 'itax' or\n atr_measure == 'ptax')\n # extract needed output that is assumed unchanged by reform from self\n record_variables = ['s006']\n if mars != 'ALL':\n record_variables.append('MARS')\n record_variables.append('expanded_income')\n vdf = self.dataframe(record_variables)\n # create 'tax1' and 'tax2' columns given specified atr_measure\n if atr_measure == 'combined':\n vdf['tax1'] = self.array('combined')\n vdf['tax2'] = calc.array('combined')\n elif atr_measure == 'itax':\n vdf['tax1'] = self.array('iitax')\n vdf['tax2'] = calc.array('iitax')\n elif atr_measure == 'ptax':\n vdf['tax1'] = self.array('payrolltax')\n vdf['tax2'] = calc.array('payrolltax')\n # select filing-status subgroup, if any\n if mars != 'ALL':\n vdf = vdf[vdf['MARS'] == mars]\n # construct data for graph\n data = atr_graph_data(vdf,\n year=self.current_year,\n mars=mars,\n atr_measure=atr_measure)\n # delete intermediate variables\n del vdf\n del record_variables\n # construct figure from data\n fig = xtr_graph_plot(data,\n width=850,\n height=500,\n xlabel='',\n ylabel='',\n title='',\n legendloc='bottom_right')\n del data\n return fig\n\n def pch_graph(self, calc):\n \"\"\"\n Create percentage change in after-tax expanded income graph that\n can be written to an HTML file (using the write_graph_file utility\n function) or shown on the screen immediately in an interactive or\n notebook session (following the instructions in the documentation\n of the xtr_graph_plot utility function). The graph shows the\n dollar-weighted mean percentage change in after-tax expanded income\n for each expanded-income percentile excluding any percentile that\n includes a filing unit with negative or zero basline (self) expanded\n income.\n\n Parameters\n ----------\n calc : Calculator object\n calc represents the reform while self represents the baseline,\n where both self and calc have calculated taxes for this year\n before being used by this method\n\n Returns\n -------\n graph that is a bokeh.plotting figure object\n \"\"\"\n # check that two Calculator objects are comparable\n assert isinstance(calc, Calculator)\n assert calc.current_year == self.current_year\n assert calc.array_len == self.array_len\n # extract needed output from baseline and reform Calculator objects\n vdf1 = self.dataframe(['s006', 'expanded_income', 'aftertax_income'])\n vdf2 = calc.dataframe(['s006', 'aftertax_income'])\n assert np.allclose(vdf1['s006'], vdf2['s006'])\n vdf = pd.DataFrame()\n vdf['s006'] = vdf1['s006']\n vdf['expanded_income'] = vdf1['expanded_income']\n vdf['chg_aftinc'] = vdf2['aftertax_income'] - vdf1['aftertax_income']\n # construct data for graph\n data = pch_graph_data(vdf, year=self.current_year)\n del vdf\n del vdf1\n del vdf2\n # construct figure from data\n fig = pch_graph_plot(data,\n width=850,\n height=500,\n xlabel='',\n ylabel='',\n title='')\n del data\n return fig\n\n def decile_graph(self, calc,\n include_zero_incomes=True,\n include_negative_incomes=True):\n \"\"\"\n Create graph that shows percentage change in aftertax expanded\n income (from going from policy in self to policy in calc) for\n each expanded-income decile and subgroups of the top decile.\n The graph can be written to an HTML file (using the\n write_graph_file utility function) or shown on the screen\n immediately in an interactive or notebook session (following\n the instructions in the documentation of the xtr_graph_plot\n utility function).\n NOTE: this method calls the distribution_tables method to\n compute the values of the graphed statistic; consult\n that method for details on how the values are computed.\n\n Parameters\n ----------\n calc : Calculator object\n calc represents the reform while self represents the baseline,\n where both self and calc have calculated taxes for this year\n before being used by this method\n\n include_zero_incomes : boolean\n if True (which is the default), the bottom decile does contain\n filing units with zero expanded_income;\n if False, the bottom decile does not contain filing units with\n zero expanded_income.\n\n include_negative_incomes : boolean\n if True (which is the default), the bottom decile does contain\n filing units with negative expanded_income;\n if False, the bottom decile does not contain filing units with\n negative expanded_income.\n\n Returns\n -------\n graph that is a bokeh.plotting figure object\n \"\"\"\n # check that two Calculator objects are comparable\n assert isinstance(calc, Calculator)\n assert calc.current_year == self.current_year\n assert calc.array_len == self.array_len\n dt1, dt2 = self.distribution_tables(calc, 'weighted_deciles')\n # construct data for graph\n data = dec_graph_data(\n dt1, dt2, year=self.current_year,\n include_zero_incomes=include_zero_incomes,\n include_negative_incomes=include_negative_incomes)\n # construct figure from data\n fig = dec_graph_plot(data,\n width=850,\n height=500,\n xlabel='',\n ylabel='',\n title='')\n del data\n del dt1\n del dt2\n return fig\n\n REQUIRED_REFORM_KEYS = set(['policy'])\n REQUIRED_ASSUMP_KEYS = set(['consumption', 'behavior',\n 'growdiff_baseline', 'growdiff_response',\n 'growmodel'])\n\n @staticmethod\n def read_json_param_objects(reform, assump):\n \"\"\"\n Read JSON reform and assump objects and\n return a single dictionary containing six key:dict pairs:\n 'policy':dict, 'consumption':dict, 'behavior':dict,\n 'growdiff_baseline':dict, 'growdiff_response':dict, and\n 'growmodel':dict.\n\n Note that either of the two function arguments can be None.\n If reform is None, the dict in the 'policy':dict pair is empty.\n If assump is None, the dict in the all the key:dict pairs is empty.\n\n Also note that either of the two function arguments can be strings\n containing a valid JSON string (rather than a filename),\n in which case the file reading is skipped and the appropriate\n read_json_*_text method is called.\n\n The reform file contents or JSON string must be like this:\n {\"policy\": {...}}\n and the assump file contents or JSON string must be like this:\n {\"consumption\": {...},\n \"behavior\": {...},\n \"growdiff_baseline\": {...},\n \"growdiff_response\": {...},\n \"growmodel\": {...}}\n The {...} should be empty like this {} if not specifying a policy\n reform or if not specifying any economic assumptions of that type.\n\n The returned dictionary contains parameter lists (not arrays).\n \"\"\"\n # pylint: disable=too-many-branches\n # first process second assump parameter\n if assump is None:\n cons_dict = dict()\n behv_dict = dict()\n gdiff_base_dict = dict()\n gdiff_resp_dict = dict()\n growmodel_dict = dict()\n elif isinstance(assump, six.string_types):\n if os.path.isfile(assump):\n txt = open(assump, 'r').read()\n else:\n txt = assump\n (cons_dict,\n behv_dict,\n gdiff_base_dict,\n gdiff_resp_dict,\n growmodel_dict) = Calculator._read_json_econ_assump_text(txt)\n else:\n raise ValueError('assump is neither None nor string')\n # next process first reform parameter\n if reform is None:\n rpol_dict = dict()\n elif isinstance(reform, six.string_types):\n if os.path.isfile(reform):\n txt = open(reform, 'r').read()\n else:\n txt = reform\n rpol_dict = (\n Calculator._read_json_policy_reform_text(txt,\n gdiff_base_dict,\n gdiff_resp_dict)\n )\n else:\n raise ValueError('reform is neither None nor string')\n # construct single composite dictionary\n param_dict = dict()\n param_dict['policy'] = rpol_dict\n param_dict['consumption'] = cons_dict\n param_dict['behavior'] = behv_dict\n param_dict['growdiff_baseline'] = gdiff_base_dict\n param_dict['growdiff_response'] = gdiff_resp_dict\n param_dict['growmodel'] = growmodel_dict\n # return the composite dictionary\n return param_dict\n\n @staticmethod\n def reform_documentation(params, policy_dicts=None):\n \"\"\"\n Generate reform documentation.\n\n Parameters\n ----------\n params: dict\n dictionary is structured like dict returned from\n the static Calculator method read_json_param_objects()\n\n policy_dicts : list of dict or None\n each dictionary in list is a params['policy'] dictionary\n representing second and subsequent elements of a compound\n reform; None implies no compound reform with the simple\n reform characterized in the params['policy'] dictionary\n\n Returns\n -------\n doc: String\n the documentation for the policy reform specified in params\n \"\"\"\n # pylint: disable=too-many-statements,too-many-branches\n\n # nested function used only in reform_documentation\n def param_doc(years, change, base):\n \"\"\"\n Parameters\n ----------\n years: list of change years\n change: dictionary of parameter changes\n base: Policy or GrowDiff object with baseline values\n syear: parameter start calendar year\n\n Returns\n -------\n doc: String\n \"\"\"\n # pylint: disable=too-many-locals\n\n # nested function used only in param_doc\n def lines(text, num_indent_spaces, max_line_length=77):\n \"\"\"\n Return list of text lines, each one of which is no longer\n than max_line_length, with the second and subsequent lines\n being indented by the number of specified num_indent_spaces;\n each line in the list ends with the '\\n' character\n \"\"\"\n if len(text) < max_line_length:\n # all text fits on one line\n line = text + '\\n'\n return [line]\n # all text does not fix on one line\n first_line = True\n line_list = list()\n words = text.split()\n while words:\n if first_line:\n line = ''\n first_line = False\n else:\n line = ' ' * num_indent_spaces\n while (words and\n (len(words[0]) + len(line)) < max_line_length):\n line += words.pop(0) + ' '\n line = line[:-1] + '\\n'\n line_list.append(line)\n return line_list\n\n # begin main logic of param_doc\n # pylint: disable=too-many-nested-blocks\n assert len(years) == len(change.keys())\n basex = copy.deepcopy(base)\n basevals = getattr(basex, '_vals', None)\n assert isinstance(basevals, dict)\n doc = ''\n for year in years:\n # write year\n basex.set_year(year)\n doc += '{}:\\n'.format(year)\n # write info for each param in year\n for param in sorted(change[year].keys()):\n # ... write param:value line\n pval = change[year][param]\n if isinstance(pval, list):\n pval = pval[0]\n if basevals[param]['boolean_value']:\n if isinstance(pval, list):\n pval = [True if item else\n False for item in pval]\n else:\n pval = bool(pval)\n doc += ' {} : {}\\n'.format(param, pval)\n # ... write optional param-index line\n if isinstance(pval, list):\n pval = basevals[param]['col_label']\n pval = [str(item) for item in pval]\n doc += ' ' * (4 + len(param)) + '{}\\n'.format(pval)\n # ... write name line\n if param.endswith('_cpi'):\n rootparam = param[:-4]\n name = '{} inflation indexing status'.format(rootparam)\n else:\n name = basevals[param]['long_name']\n for line in lines('name: ' + name, 6):\n doc += ' ' + line\n # ... write optional desc line\n if not param.endswith('_cpi'):\n desc = basevals[param]['description']\n for line in lines('desc: ' + desc, 6):\n doc += ' ' + line\n # ... write baseline_value line\n if isinstance(basex, Policy):\n if param.endswith('_cpi'):\n rootparam = param[:-4]\n bval = basevals[rootparam].get('cpi_inflated',\n False)\n else:\n bval = getattr(basex, param[1:], None)\n if isinstance(bval, np.ndarray):\n bval = bval.tolist()\n if basevals[param]['boolean_value']:\n bval = [True if item else\n False for item in bval]\n elif basevals[param]['boolean_value']:\n bval = bool(bval)\n doc += ' baseline_value: {}\\n'.format(bval)\n else: # if basex is GrowDiff object\n # all GrowDiff parameters have zero as default value\n doc += ' baseline_value: 0.0\\n'\n return doc\n\n # begin main logic of reform_documentation\n # create Policy object with pre-reform (i.e., baseline) values\n # ... create gdiff_baseline object\n gdb = GrowDiff()\n gdb.update_growdiff(params['growdiff_baseline'])\n # ... create GrowFactors object that will incorporate gdiff_baseline\n gfactors_clp = GrowFactors()\n gdb.apply_to(gfactors_clp)\n # ... create Policy object containing pre-reform parameter values\n clp = Policy(gfactors=gfactors_clp)\n # generate documentation text\n doc = 'REFORM DOCUMENTATION\\n'\n doc += 'Baseline Growth-Difference Assumption Values by Year:\\n'\n years = sorted(params['growdiff_baseline'].keys())\n if years:\n doc += param_doc(years, params['growdiff_baseline'], gdb)\n else:\n doc += 'none: using default baseline growth assumptions\\n'\n doc += 'Policy Reform Parameter Values by Year:\\n'\n years = sorted(params['policy'].keys())\n if years:\n doc += param_doc(years, params['policy'], clp)\n else:\n doc += 'none: using current-law policy parameters\\n'\n if policy_dicts is not None:\n assert isinstance(policy_dicts, list)\n base = clp\n base.implement_reform(params['policy'])\n assert not base.parameter_errors\n for policy_dict in policy_dicts:\n assert isinstance(policy_dict, dict)\n doc += 'Policy Reform Parameter Values by Year:\\n'\n years = sorted(policy_dict.keys())\n doc += param_doc(years, policy_dict, base)\n base.implement_reform(policy_dict)\n assert not base.parameter_errors\n return doc\n\n def ce_aftertax_income(self, calc,\n custom_params=None,\n require_no_agg_tax_change=True):\n \"\"\"\n Return dictionary that contains certainty-equivalent of the\n expected utility of after-tax expanded income computed for\n several constant-relative-risk-aversion parameter values\n for each of two Calculator objects: self, which represents\n the pre-reform situation, and calc, which represents the\n post-reform situation, both of which MUST have had calc_call()\n called before being passed to this function.\n\n IMPORTANT NOTES: These normative welfare calculations are very\n simple. It is assumed that utility is a function of only\n consumption, and that consumption is equal to after-tax\n income. This means that any assumed behavioral responses that\n change work effort will not affect utility via the\n correpsonding change in leisure. And any saving response to\n changes in after-tax income do not affect consumption.\n\n The cmin value is the consumption level below which marginal\n utility is considered to be constant. This allows the handling\n of filing units with very low or even negative after-tax expanded\n income in the expected-utility and certainty-equivalent calculations.\n \"\"\"\n # check that calc and self are consistent\n assert isinstance(calc, Calculator)\n assert calc.array_len == self.array_len\n assert calc.current_year == self.current_year\n assert np.allclose(calc.consump_benval_params(),\n self.consump_benval_params())\n # extract data from self and calc\n records_variables = ['s006', 'combined', 'expanded_income']\n df1 = self.dataframe(records_variables)\n df2 = calc.dataframe(records_variables)\n cedict = ce_aftertax_expanded_income(\n df1, df2,\n custom_params=custom_params,\n require_no_agg_tax_change=require_no_agg_tax_change)\n cedict['year'] = self.current_year\n return cedict\n\n # ----- begin private methods of Calculator class -----\n\n def _taxinc_to_amt(self):\n \"\"\"\n Call TaxInc through AMT functions.\n \"\"\"\n TaxInc(self.__policy, self.__records)\n SchXYZTax(self.__policy, self.__records)\n GainsTax(self.__policy, self.__records)\n AGIsurtax(self.__policy, self.__records)\n NetInvIncTax(self.__policy, self.__records)\n AMT(self.__policy, self.__records)\n\n def _calc_one_year(self, zero_out_calc_vars=False):\n \"\"\"\n Call all the functions except those in the calc_all() method.\n \"\"\"\n if zero_out_calc_vars:\n self.__records.zero_out_changing_calculated_vars()\n # pdb.set_trace()\n EI_PayrollTax(self.__policy, self.__records)\n DependentCare(self.__policy, self.__records)\n Adj(self.__policy, self.__records)\n ALD_InvInc_ec_base(self.__policy, self.__records)\n CapGains(self.__policy, self.__records)\n SSBenefits(self.__policy, self.__records)\n UBI(self.__policy, self.__records)\n AGI(self.__policy, self.__records)\n ItemDedCap(self.__policy, self.__records)\n ItemDed(self.__policy, self.__records)\n AdditionalMedicareTax(self.__policy, self.__records)\n StdDed(self.__policy, self.__records)\n # Store calculated standard deduction, calculate\n # taxes with standard deduction, store AMT + Regular Tax\n std = self.array('standard').copy()\n item = self.array('c04470').copy()\n item_no_limit = self.array('c21060').copy()\n item_phaseout = self.array('c21040').copy()\n self.zeroarray('c04470')\n self.zeroarray('c21060')\n self.zeroarray('c21040')\n self._taxinc_to_amt()\n std_taxes = self.array('c05800').copy()\n # Set standard deduction to zero, calculate taxes w/o\n # standard deduction, and store AMT + Regular Tax\n self.zeroarray('standard')\n self.array('c21060', item_no_limit)\n self.array('c21040', item_phaseout)\n self.array('c04470', item)\n self._taxinc_to_amt()\n item_taxes = self.array('c05800').copy()\n # Replace standard deduction with zero where the taxpayer\n # would be better off itemizing\n self.array('standard', np.where(item_taxes < std_taxes,\n 0., std))\n self.array('c04470', np.where(item_taxes < std_taxes,\n item, 0.))\n self.array('c21060', np.where(item_taxes < std_taxes,\n item_no_limit, 0.))\n self.array('c21040', np.where(item_taxes < std_taxes,\n item_phaseout, 0.))\n # Calculate taxes with optimal itemized deduction\n self._taxinc_to_amt()\n F2441(self.__policy, self.__records)\n EITC(self.__policy, self.__records)\n ChildDepTaxCredit(self.__policy, self.__records)\n PersonalTaxCredit(self.__policy, self.__records)\n AmOppCreditParts(self.__policy, self.__records)\n SchR(self.__policy, self.__records)\n EducationTaxCredit(self.__policy, self.__records)\n NonrefundableCredits(self.__policy, self.__records)\n AdditionalCTC(self.__policy, self.__records)\n C1040(self.__policy, self.__records)\n CTC_new(self.__policy, self.__records)\n IITAX(self.__policy, self.__records)\n\n @staticmethod\n def _read_json_policy_reform_text(text_string,\n growdiff_baseline_dict,\n growdiff_response_dict):\n \"\"\"\n Strip //-comments from text_string and return 1 dict based on the JSON.\n\n Specified text is JSON with at least 1 high-level key:object pair:\n a \"policy\": {...} pair.\n\n Other keys such as \"consumption\", \"behavior\", \"growdiff_baseline\",\n \"growdiff_response\" or \"growmodel\" will raise a ValueError.\n\n The {...} object may be empty (that is, be {}), or\n may contain one or more pairs with parameter string primary keys\n and string years as secondary keys. See tests/test_calculate.py for\n an extended example of a commented JSON policy reform text\n that can be read by this method.\n\n Returned dictionary prdict has integer years as primary keys and\n string parameters as secondary keys. This returned dictionary is\n suitable as the argument to the Policy implement_reform(prdict) method.\n \"\"\"\n # strip out //-comments without changing line numbers\n json_str = re.sub('//.*', ' ', text_string)\n # convert JSON text into a Python dictionary\n try:\n raw_dict = json.loads(json_str)\n except ValueError as valerr:\n msg = 'Policy reform text below contains invalid JSON:\\n'\n msg += str(valerr) + '\\n'\n msg += 'Above location of the first error may be approximate.\\n'\n msg += 'The invalid JSON reform text is between the lines:\\n'\n bline = 'XX----.----1----.----2----.----3----.----4'\n bline += '----.----5----.----6----.----7'\n msg += bline + '\\n'\n linenum = 0\n for line in json_str.split('\\n'):\n linenum += 1\n msg += '{:02d}{}'.format(linenum, line) + '\\n'\n msg += bline + '\\n'\n raise ValueError(msg)\n # check key contents of dictionary\n actual_keys = set(raw_dict.keys())\n missing_keys = Calculator.REQUIRED_REFORM_KEYS - actual_keys\n if missing_keys:\n msg = 'required key(s) \"{}\" missing from policy reform file'\n raise ValueError(msg.format(missing_keys))\n illegal_keys = actual_keys - Calculator.REQUIRED_REFORM_KEYS\n if illegal_keys:\n msg = 'illegal key(s) \"{}\" in policy reform file'\n raise ValueError(msg.format(illegal_keys))\n # convert raw_dict['policy'] dictionary into prdict\n tdict = Policy.translate_json_reform_suffixes(raw_dict['policy'],\n growdiff_baseline_dict,\n growdiff_response_dict)\n prdict = Calculator._convert_parameter_dict(tdict)\n return prdict\n\n @staticmethod\n def _read_json_econ_assump_text(text_string):\n \"\"\"\n Strip //-comments from text_string and return 5 dict based on the JSON.\n\n Specified text is JSON with at least 5 high-level key:value pairs:\n a \"consumption\": {...} pair,\n a \"behavior\": {...} pair,\n a \"growdiff_baseline\": {...} pair,\n a \"growdiff_response\": {...} pair, and\n a \"growmodel\": {...} pair.\n\n Other keys such as \"policy\" will raise a ValueError.\n\n The {...} object may be empty (that is, be {}), or\n may contain one or more pairs with parameter string primary keys\n and string years as secondary keys. See tests/test_calculate.py for\n an extended example of a commented JSON economic assumption text\n that can be read by this method.\n\n Note that an example is shown in the ASSUMP_CONTENTS string in\n the tests/test_calculate.py file.\n\n Returned dictionaries (cons_dict, behv_dict, gdiff_baseline_dict,\n gdiff_respose_dict, growmodel_dict) have integer years as primary\n keys and string parameters as secondary keys.\n\n These returned dictionaries are suitable as the arguments to\n the Consumption.update_consumption(cons_dict) method, or\n the Behavior.update_behavior(behv_dict) method, or\n the GrowDiff.update_growdiff(gdiff_dict) method, or\n the GrowModel.update_growmodel(growmodel_dict) method.\n \"\"\"\n # pylint: disable=too-many-locals\n # strip out //-comments without changing line numbers\n json_str = re.sub('//.*', ' ', text_string)\n # convert JSON text into a Python dictionary\n try:\n raw_dict = json.loads(json_str)\n except ValueError as valerr:\n msg = 'Economic assumption text below contains invalid JSON:\\n'\n msg += str(valerr) + '\\n'\n msg += 'Above location of the first error may be approximate.\\n'\n msg += 'The invalid JSON asssump text is between the lines:\\n'\n bline = 'XX----.----1----.----2----.----3----.----4'\n bline += '----.----5----.----6----.----7'\n msg += bline + '\\n'\n linenum = 0\n for line in json_str.split('\\n'):\n linenum += 1\n msg += '{:02d}{}'.format(linenum, line) + '\\n'\n msg += bline + '\\n'\n raise ValueError(msg)\n # check key contents of dictionary\n actual_keys = set(raw_dict.keys())\n missing_keys = Calculator.REQUIRED_ASSUMP_KEYS - actual_keys\n if missing_keys:\n msg = 'required key(s) \"{}\" missing from economic assumption file'\n raise ValueError(msg.format(missing_keys))\n illegal_keys = actual_keys - Calculator.REQUIRED_ASSUMP_KEYS\n if illegal_keys:\n msg = 'illegal key(s) \"{}\" in economic assumption file'\n raise ValueError(msg.format(illegal_keys))\n # convert the assumption dictionaries in raw_dict\n key = 'consumption'\n cons_dict = Calculator._convert_parameter_dict(raw_dict[key])\n key = 'behavior'\n behv_dict = Calculator._convert_parameter_dict(raw_dict[key])\n key = 'growdiff_baseline'\n gdiff_base_dict = Calculator._convert_parameter_dict(raw_dict[key])\n key = 'growdiff_response'\n gdiff_resp_dict = Calculator._convert_parameter_dict(raw_dict[key])\n key = 'growmodel'\n growmodel_dict = Calculator._convert_parameter_dict(raw_dict[key])\n return (cons_dict, behv_dict, gdiff_base_dict, gdiff_resp_dict,\n growmodel_dict)\n\n @staticmethod\n def _convert_parameter_dict(param_key_dict):\n \"\"\"\n Converts specified param_key_dict into a dictionary whose primary\n keys are calendar years, and hence, is suitable as the argument to\n the Policy.implement_reform() method, or\n the Consumption.update_consumption() method, or\n the Behavior.update_behavior() method, or\n the GrowDiff.update_growdiff() method, or\n the GrowModel.update_growmodel() method.\n\n Specified input dictionary has string parameter primary keys and\n string years as secondary keys.\n\n Returned dictionary has integer years as primary keys and\n string parameters as secondary keys.\n \"\"\"\n # convert year skey strings into integers and\n # optionally convert lists into np.arrays\n year_param = dict()\n for pkey, sdict in param_key_dict.items():\n if not isinstance(pkey, six.string_types):\n msg = 'pkey {} in reform is not a string'\n raise ValueError(msg.format(pkey))\n rdict = dict()\n if not isinstance(sdict, dict):\n msg = 'pkey {} in reform is not paired with a dict'\n raise ValueError(msg.format(pkey))\n for skey, val in sdict.items():\n if not isinstance(skey, six.string_types):\n msg = 'skey {} in reform is not a string'\n raise ValueError(msg.format(skey))\n else:\n year = int(skey)\n rdict[year] = val\n year_param[pkey] = rdict\n # convert year_param dictionary to year_key_dict dictionary\n year_key_dict = dict()\n years = set()\n for param, sdict in year_param.items():\n for year, val in sdict.items():\n if year not in years:\n years.add(year)\n year_key_dict[year] = dict()\n year_key_dict[year][param] = val\n return year_key_dict\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.allclose", "numpy.array_equal" ], [ "pandas.concat", "numpy.allclose", "pandas.DataFrame", "numpy.column_stack", "numpy.where", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
shivgarg/alfred_transformers
[ "3eab07d3a218eb9b809dec8b7120b92ebd00c890" ]
[ "models/model/seq2seq_im_mask_cnn_finetune.py" ]
[ "import os\nimport torch\nimport numpy as np\nimport nn.vnn as vnn\nimport collections\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\nfrom model.seq2seq import Module as Base\nfrom models.utils.metric import compute_f1, compute_exact\nfrom gen.utils.image_util import decompress_mask\nfrom torchvision import transforms\nfrom PIL import Image\n\nclass Module(Base):\n\n def __init__(self, args, vocab):\n '''\n Seq2Seq agent\n '''\n super().__init__(args, vocab)\n\n # encoder and self-attention\n self.enc = nn.LSTM(args.demb, args.dhid, bidirectional=True, batch_first=True)\n self.enc_att = vnn.SelfAttn(args.dhid*2)\n\n # subgoal monitoring\n self.subgoal_monitoring = (self.args.pm_aux_loss_wt > 0 or self.args.subgoal_aux_loss_wt > 0)\n\n # frame mask decoder\n decoder = vnn.ConvFrameMaskDecoderProgressMonitorFinetune if self.subgoal_monitoring else vnn.ConvFrameMaskDecoder\n self.dec = decoder(self.emb_action_low, args.dframe, 2*args.dhid,\n pframe=args.pframe,\n attn_dropout=args.attn_dropout,\n hstate_dropout=args.hstate_dropout,\n actor_dropout=args.actor_dropout,\n input_dropout=args.input_dropout,\n teacher_forcing=args.dec_teacher_forcing)\n\n # dropouts\n self.vis_dropout = nn.Dropout(args.vis_dropout)\n self.lang_dropout = nn.Dropout(args.lang_dropout, inplace=True)\n self.input_dropout = nn.Dropout(args.input_dropout)\n\n # internal states\n self.state_t = None\n self.e_t = None\n self.test_mode = False\n\n # bce reconstruction loss\n self.bce_with_logits = torch.nn.BCEWithLogitsLoss(reduction='none')\n self.mse_loss = torch.nn.MSELoss(reduction='none')\n\n # paths\n self.root_path = os.getcwd()\n self.feat_pt = 'feat_conv.pt'\n\n # params\n self.max_subgoals = 25\n self.max_episode_len = args.max_episode_len\n # reset model\n self.reset()\n\n def featurize(self, batch, load_mask=True, load_frames=True):\n '''\n tensorize and pad batch input\n '''\n device = torch.device('cuda') if self.args.gpu else torch.device('cpu')\n feat = collections.defaultdict(list)\n\n for ex in batch:\n ###########\n # auxillary\n ###########\n\n if not self.test_mode:\n # subgoal completion supervision\n if self.args.subgoal_aux_loss_wt > 0:\n feat['subgoals_completed'].append(np.array(ex['num']['low_to_high_idx']) / self.max_subgoals)\n\n # progress monitor supervision\n if self.args.pm_aux_loss_wt > 0:\n num_actions = len([a for sg in ex['num']['action_low'] for a in sg])\n subgoal_progress = [(i+1)/float(num_actions) for i in range(num_actions)]\n feat['subgoal_progress'].append(subgoal_progress)\n\n #########\n # inputs\n #########\n\n # serialize segments\n self.serialize_lang_action(ex)\n\n # goal and instr language\n lang_goal, lang_instr = ex['num']['lang_goal'], ex['num']['lang_instr']\n\n # zero inputs if specified\n lang_goal = self.zero_input(lang_goal) if self.args.zero_goal else lang_goal\n lang_instr = self.zero_input(lang_instr) if self.args.zero_instr else lang_instr\n\n # append goal + instr\n lang_goal_instr = lang_goal + lang_instr\n feat['lang_goal_instr'].append(lang_goal_instr)\n episode_len = 0\n # load Resnet features from disk\n if load_frames and not self.test_mode:\n root = self.get_task_root(ex)\n #im = torch.load(os.path.join(root, self.feat_pt))\n im = []\n path = \"{}/{}\".format(root,'raw_images')\n imgs = sorted(os.listdir(path))\n tfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])\n for img in imgs:\n im.append(tfms(Image.open(\"{}/{}\".format(path,img))))\n im = torch.stack(im)\n num_low_actions = len(ex['plan']['low_actions'])\n num_feat_frames = im.shape[0]\n\n if num_low_actions != num_feat_frames:\n keep = [None] * len(ex['plan']['low_actions'])\n for i, d in enumerate(ex['images']):\n # only add frames linked with low-level actions (i.e. skip filler frames like smooth rotations and dish washing)\n if keep[d['low_idx']] is None:\n keep[d['low_idx']] = im[i]\n keep.append(keep[-1]) # stop frame\n episode_len = min(self.max_episode_len, len(keep))\n keep = keep[:episode_len]\n feat['frames'].append(torch.stack(keep, dim=0))\n else:\n episode_len = min(self.max_episode_len, len(im))\n im = im[:episode_len]\n feat['frames'].append(torch.cat([im, im[-1].unsqueeze(0)], dim=0)) # add stop frame\n\n #########\n # outputs\n #########\n if self.args.subgoal_aux_loss_wt > 0:\n feat['subgoals_completed'][-1] = feat['subgoals_completed'][-1][:episode_len]\n \n if self.args.pm_aux_loss_wt > 0:\n feat['subgoal_progress'][-1] = feat['subgoal_progress'][-1][:episode_len]\n \n if not self.test_mode:\n # low-level action\n feat['action_low'].append([a['action'] for a in ex['num']['action_low']][:episode_len])\n\n # low-level action mask\n if load_mask:\n feat['action_low_mask'].append([self.decompress_mask(a['mask']) for i,a in enumerate(ex['num']['action_low']) if a['mask'] is not None and i<episode_len])\n\n # low-level valid interact\n feat['action_low_valid_interact'].append([a['valid_interact'] for a in ex['num']['action_low']][:episode_len])\n\n\n # tensorization and padding\n for k, v in feat.items():\n if k in {'lang_goal_instr'}:\n # language embedding and padding\n seqs = [torch.tensor(vv, device=device) for vv in v]\n pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)\n seq_lengths = np.array(list(map(len, v)))\n embed_seq = self.emb_word(pad_seq)\n packed_input = pack_padded_sequence(embed_seq, seq_lengths, batch_first=True, enforce_sorted=False)\n feat[k] = packed_input\n elif k in {'action_low_mask'}:\n # mask padding\n seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]\n feat[k] = seqs\n elif k in {'subgoal_progress', 'subgoals_completed'}:\n # auxillary padding\n seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]\n pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)\n feat[k] = pad_seq\n else:\n # default: tensorize and pad sequence\n seqs = [torch.tensor(vv, device=device, dtype=torch.float if ('frames' in k) else torch.long) for vv in v]\n pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)\n feat[k] = pad_seq\n\n return feat\n\n\n def serialize_lang_action(self, feat):\n '''\n append segmented instr language and low-level actions into single sequences\n '''\n is_serialized = not isinstance(feat['num']['lang_instr'][0], list)\n if not is_serialized:\n feat['num']['lang_instr'] = [word for desc in feat['num']['lang_instr'] for word in desc]\n if not self.test_mode:\n feat['num']['action_low'] = [a for a_group in feat['num']['action_low'] for a in a_group]\n\n\n def decompress_mask(self, compressed_mask):\n '''\n decompress mask from json files\n '''\n mask = np.array(decompress_mask(compressed_mask))\n mask = np.expand_dims(mask, axis=0)\n return mask\n\n\n def forward(self, feat, max_decode=300):\n cont_lang, enc_lang = self.encode_lang(feat)\n state_0 = cont_lang, torch.zeros_like(cont_lang)\n frames = self.vis_dropout(feat['frames'])\n res = self.dec(enc_lang, frames, max_decode=self.max_episode_len, gold=feat['action_low'], state_0=state_0)\n feat.update(res)\n return feat\n\n\n def encode_lang(self, feat):\n '''\n encode goal+instr language\n '''\n emb_lang_goal_instr = feat['lang_goal_instr']\n self.lang_dropout(emb_lang_goal_instr.data)\n enc_lang_goal_instr, _ = self.enc(emb_lang_goal_instr)\n enc_lang_goal_instr, _ = pad_packed_sequence(enc_lang_goal_instr, batch_first=True)\n self.lang_dropout(enc_lang_goal_instr)\n cont_lang_goal_instr = self.enc_att(enc_lang_goal_instr)\n\n return cont_lang_goal_instr, enc_lang_goal_instr\n\n\n def reset(self):\n '''\n reset internal states (used for real-time execution during eval)\n '''\n self.r_state = {\n 'state_t': None,\n 'e_t': None,\n 'cont_lang': None,\n 'enc_lang': None\n }\n\n def step(self, feat, prev_action=None):\n '''\n forward the model for a single time-step (used for real-time execution during eval)\n '''\n\n # encode language features\n if self.r_state['cont_lang'] is None and self.r_state['enc_lang'] is None:\n self.r_state['cont_lang'], self.r_state['enc_lang'] = self.encode_lang(feat)\n\n # initialize embedding and hidden states\n if self.r_state['e_t'] is None and self.r_state['state_t'] is None:\n self.r_state['e_t'] = self.dec.go.repeat(self.r_state['enc_lang'].size(0), 1)\n self.r_state['state_t'] = self.r_state['cont_lang'], torch.zeros_like(self.r_state['cont_lang'])\n\n # previous action embedding\n e_t = self.embed_action(prev_action) if prev_action is not None else self.r_state['e_t']\n\n # decode and save embedding and hidden states\n out_action_low, out_action_low_mask, state_t, *_ = self.dec.step(self.r_state['enc_lang'], feat['frames'][:, 0], e_t=e_t, state_tm1=self.r_state['state_t'])\n\n # save states\n self.r_state['state_t'] = state_t\n self.r_state['e_t'] = self.dec.emb(out_action_low.max(1)[1])\n\n # output formatting\n feat['out_action_low'] = out_action_low.unsqueeze(0)\n feat['out_action_low_mask'] = out_action_low_mask.unsqueeze(0)\n return feat\n\n\n def extract_preds(self, out, batch, feat, clean_special_tokens=True):\n '''\n output processing\n '''\n pred = {}\n for ex, alow, alow_mask in zip(batch, feat['out_action_low'].max(2)[1].tolist(), feat['out_action_low_mask']):\n # remove padding tokens\n if self.pad in alow:\n pad_start_idx = alow.index(self.pad)\n alow = alow[:pad_start_idx]\n alow_mask = alow_mask[:pad_start_idx]\n\n if clean_special_tokens:\n # remove <<stop>> tokens\n if self.stop_token in alow:\n stop_start_idx = alow.index(self.stop_token)\n alow = alow[:stop_start_idx]\n alow_mask = alow_mask[:stop_start_idx]\n\n # index to API actions\n words = self.vocab['action_low'].index2word(alow)\n\n # sigmoid preds to binary mask\n alow_mask = F.sigmoid(alow_mask)\n p_mask = [(alow_mask[t] > 0.5).cpu().numpy() for t in range(alow_mask.shape[0])]\n\n task_id_ann = self.get_task_and_ann_id(ex)\n pred[task_id_ann] = {\n 'action_low': ' '.join(words),\n 'action_low_mask': p_mask,\n }\n\n return pred\n\n\n def embed_action(self, action):\n '''\n embed low-level action\n '''\n device = torch.device('cuda') if self.args.gpu else torch.device('cpu')\n action_num = torch.tensor(self.vocab['action_low'].word2index(action), device=device)\n action_emb = self.dec.emb(action_num).unsqueeze(0)\n return action_emb\n\n\n def compute_loss(self, out, batch, feat):\n '''\n loss function for Seq2Seq agent\n '''\n losses = dict()\n\n # GT and predictions\n p_alow = out['out_action_low'].view(-1, len(self.vocab['action_low']))\n l_alow = feat['action_low'].view(-1)\n p_alow_mask = out['out_action_low_mask']\n valid = feat['action_low_valid_interact']\n\n # action loss\n pad_valid = (l_alow != self.pad)\n alow_loss = F.cross_entropy(p_alow, l_alow, reduction='none')\n alow_loss *= pad_valid.float()\n alow_loss = alow_loss.mean()\n losses['action_low'] = alow_loss * self.args.action_loss_wt\n\n # mask loss\n \n valid_idxs = valid.view(-1).nonzero().view(-1)\n flat_p_alow_mask = p_alow_mask.view(p_alow_mask.shape[0]*p_alow_mask.shape[1], *p_alow_mask.shape[2:])[valid_idxs]\n if flat_p_alow_mask.shape[0]!=0:\n flat_alow_mask = torch.cat(feat['action_low_mask'], dim=0)\n alow_mask_loss = self.weighted_mask_loss(flat_p_alow_mask, flat_alow_mask)\n losses['action_low_mask'] = alow_mask_loss * self.args.mask_loss_wt\n \n # subgoal completion loss\n if self.args.subgoal_aux_loss_wt > 0:\n p_subgoal = feat['out_subgoal'].squeeze(2)\n l_subgoal = feat['subgoals_completed']\n sg_loss = self.mse_loss(p_subgoal, l_subgoal)\n sg_loss = sg_loss.view(-1) * pad_valid.float()\n subgoal_loss = sg_loss.mean()\n losses['subgoal_aux'] = self.args.subgoal_aux_loss_wt * subgoal_loss\n\n # progress monitoring loss\n if self.args.pm_aux_loss_wt > 0:\n p_progress = feat['out_progress'].squeeze(2)\n l_progress = feat['subgoal_progress']\n pg_loss = self.mse_loss(p_progress, l_progress)\n pg_loss = pg_loss.view(-1) * pad_valid.float()\n progress_loss = pg_loss.mean()\n losses['progress_aux'] = self.args.pm_aux_loss_wt * progress_loss\n\n return losses\n\n\n def weighted_mask_loss(self, pred_masks, gt_masks):\n '''\n mask loss that accounts for weight-imbalance between 0 and 1 pixels\n '''\n bce = self.bce_with_logits(pred_masks, gt_masks)\n flipped_mask = self.flip_tensor(gt_masks)\n inside = (bce * gt_masks).sum() / (gt_masks).sum()\n outside = (bce * flipped_mask).sum() / (flipped_mask).sum()\n return inside + outside\n\n\n def flip_tensor(self, tensor, on_zero=1, on_non_zero=0):\n '''\n flip 0 and 1 values in tensor\n '''\n res = tensor.clone()\n res[tensor == 0] = on_zero\n res[tensor != 0] = on_non_zero\n return res\n\n\n def compute_metric(self, preds, data):\n '''\n compute f1 and extract match scores for output\n '''\n m = collections.defaultdict(list)\n for task in data:\n ex = self.load_task_json(task)\n i = self.get_task_and_ann_id(ex)\n label = ' '.join([a['discrete_action']['action'] for a in ex['plan']['low_actions']])\n m['action_low_f1'].append(compute_f1(label.lower(), preds[i]['action_low'].lower()))\n m['action_low_em'].append(compute_exact(label.lower(), preds[i]['action_low'].lower()))\n return {k: sum(v)/len(v) for k, v in m.items()}\n" ]
[ [ "torch.nn.Dropout", "numpy.expand_dims", "torch.nn.LSTM", "torch.cat", "torch.nn.functional.cross_entropy", "torch.zeros_like", "torch.nn.utils.rnn.pad_sequence", "torch.nn.utils.rnn.pack_padded_sequence", "torch.tensor", "torch.nn.functional.sigmoid", "torch.nn.BCEWithLogitsLoss", "torch.nn.utils.rnn.pad_packed_sequence", "torch.device", "numpy.array", "torch.nn.MSELoss", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GarnixJu2015/GamestonkTerminal
[ "ec400e46ddce4ac934af836b863528f14a13d865" ]
[ "tests/gamestonk_terminal/etf/discovery/test_disc_controller.py" ]
[ "# IMPORTATION STANDARD\nimport os\n\n# IMPORTATION THIRDPARTY\nimport pandas as pd\nimport pytest\n\n# IMPORTATION INTERNAL\nfrom gamestonk_terminal.etf.discovery import disc_controller\n\n# pylint: disable=E1101\n# pylint: disable=W0603\n# pylint: disable=E1111\n\nEMPTY_DF = pd.DataFrame()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"queue, expected\",\n [\n ([\"load\", \"help\"], []),\n ([\"quit\", \"help\"], [\"help\"]),\n ],\n)\ndef test_menu_with_queue(expected, mocker, queue):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n # MOCK SWITCH\n mocker.patch(\n target=f\"{path_controller}.DiscoveryController.switch\",\n return_value=[\"quit\"],\n )\n result_menu = disc_controller.DiscoveryController(queue=queue).menu()\n\n assert result_menu == expected\n\n\[email protected](record_mode=\"none\")\ndef test_menu_without_queue_completion(mocker):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n # ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU\n mocker.patch(\n target=\"gamestonk_terminal.feature_flags.USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=\"gamestonk_terminal.parent_classes.session\",\n )\n mocker.patch(\n target=\"gamestonk_terminal.parent_classes.session.prompt\",\n return_value=\"quit\",\n )\n mocker.patch(\n target=\"gamestonk_terminal.etf.financedatabase_model.get_etfs_categories\",\n return_value=[\"Bank Loan\"],\n )\n\n # DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER\n mocker.patch.object(\n target=disc_controller.gtff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=True,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n )\n mocker.patch(\n target=f\"{path_controller}.session.prompt\",\n return_value=\"quit\",\n )\n\n result_menu = disc_controller.DiscoveryController(queue=None).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"mock_input\",\n [\"help\", \"homee help\", \"home help\", \"mock\"],\n)\ndef test_menu_without_queue_sys_exit(mock_input, mocker):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n # DISABLE AUTO-COMPLETION\n mocker.patch.object(\n target=disc_controller.gtff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=False,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n return_value=None,\n )\n\n # MOCK USER INPUT\n mocker.patch(\"builtins.input\", return_value=mock_input)\n\n # MOCK SWITCH\n class SystemExitSideEffect:\n def __init__(self):\n self.first_call = True\n\n def __call__(self, *args, **kwargs):\n if self.first_call:\n self.first_call = False\n raise SystemExit()\n return [\"quit\"]\n\n mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())\n mocker.patch(\n target=f\"{path_controller}.DiscoveryController.switch\",\n new=mock_switch,\n )\n\n result_menu = disc_controller.DiscoveryController(queue=None).menu()\n\n assert result_menu == []\n\n\[email protected](record_mode=\"none\")\[email protected]_stdout\ndef test_print_help():\n\n controller = disc_controller.DiscoveryController(queue=None)\n controller.print_help()\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"quit\", \"quit\", \"help\"]),\n (\"help/help\", [\"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n ],\n ),\n ],\n)\ndef test_switch(an_input, expected_queue):\n\n controller = disc_controller.DiscoveryController(queue=None)\n queue = controller.switch(an_input=an_input)\n\n assert queue == expected_queue\n\n\[email protected](record_mode=\"none\")\ndef test_call_cls(mocker):\n mocker.patch(\"os.system\")\n\n controller = disc_controller.DiscoveryController(queue=None)\n controller.call_cls([])\n\n assert controller.queue == []\n os.system.assert_called_once_with(\"cls||clear\")\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"func, queue, expected_queue\",\n [\n (\n \"call_exit\",\n [],\n [\"quit\", \"quit\", \"quit\"],\n ),\n (\"call_exit\", [\"help\"], [\"quit\", \"quit\", \"quit\", \"help\"]),\n (\"call_home\", [], [\"quit\", \"quit\"]),\n (\"call_help\", [], []),\n (\"call_quit\", [], [\"quit\"]),\n (\"call_quit\", [\"help\"], [\"quit\", \"help\"]),\n (\n \"call_reset\",\n [],\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n ],\n ),\n (\n \"call_reset\",\n [\"help\"],\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"disc\",\n \"help\",\n ],\n ),\n ],\n)\ndef test_call_func_expect_queue(expected_queue, func, queue):\n controller = disc_controller.DiscoveryController(queue=queue)\n result = getattr(controller, func)([])\n\n assert result is None\n assert controller.queue == expected_queue\n\n\[email protected](record_mode=\"none\")\[email protected](\n \"tested_func, other_args, mocked_func, called_args, called_kwargs\",\n [\n (\n \"call_gainers\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"gainers\", 10, \"\"],\n dict(),\n ),\n (\n \"call_decliners\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"decliners\", 10, \"\"],\n dict(),\n ),\n (\n \"call_active\",\n [\"-l=10\"],\n \"wsj_view.show_top_mover\",\n [\"active\", 10, \"\"],\n dict(),\n ),\n ],\n)\ndef test_call_func_test(\n tested_func, mocked_func, other_args, called_args, called_kwargs, mocker\n):\n path_controller = \"gamestonk_terminal.etf.discovery.disc_controller\"\n\n if mocked_func:\n mock = mocker.Mock()\n mocker.patch(\n target=f\"{path_controller}.{mocked_func}\",\n new=mock,\n )\n\n controller = disc_controller.DiscoveryController(queue=None)\n\n getattr(controller, tested_func)(other_args)\n\n if called_args or called_kwargs:\n mock.assert_called_once_with(*called_args, **called_kwargs)\n else:\n mock.assert_called_once()\n else:\n controller = disc_controller.DiscoveryController(queue=None)\n getattr(controller, tested_func)(other_args)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
maij/pyGSTi
[ "4f8bf5337b01b7afcb7b0580b717b5d1fe281be4", "4f8bf5337b01b7afcb7b0580b717b5d1fe281be4" ]
[ "pygsti/models/modelconstruction.py", "pygsti/report/workspacetables.py" ]
[ "\"\"\"\nFunctions for the construction of new models.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport collections as _collections\nimport itertools as _itertools\nfrom os import stat\nfrom pygsti.modelmembers.instruments.instrument import Instrument\n\nimport numpy as _np\nimport scipy as _scipy\nimport scipy.linalg as _spl\n\nfrom pygsti.evotypes import Evotype as _Evotype\nfrom pygsti.modelmembers import operations as _op\nfrom pygsti.modelmembers import povms as _povm\nfrom pygsti.modelmembers import states as _state\nfrom pygsti.modelmembers import instruments as _instrument\nfrom pygsti.modelmembers.operations import opfactory as _opfactory\nfrom pygsti.models import stencillabel as _stencil\nfrom pygsti.models.modelnoise import OpModelNoise as _OpModelNoise\nfrom pygsti.models.modelnoise import OpModelPerOpNoise as _OpModelPerOpNoise\nfrom pygsti.models.modelnoise import ComposedOpModelNoise as _ComposedOpModelNoise\nfrom pygsti.models.modelnoise import LindbladNoise as _LindbladNoise\nfrom pygsti.models.modelnoise import StochasticNoise as _StochasticNoise\nfrom pygsti.models.modelnoise import DepolarizationNoise as _DepolarizationNoise\nfrom pygsti.models import explicitmodel as _emdl\nfrom pygsti.models import gaugegroup as _gg\nfrom pygsti.models.localnoisemodel import LocalNoiseModel as _LocalNoiseModel\nfrom pygsti.models.cloudnoisemodel import CloudNoiseModel as _CloudNoiseModel\nfrom pygsti.baseobjs import label as _label\nfrom pygsti.baseobjs import statespace as _statespace\nfrom pygsti.baseobjs.basis import Basis as _Basis\nfrom pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis\nfrom pygsti.baseobjs.basis import DirectSumBasis as _DirectSumBasis\nfrom pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph\nfrom pygsti.tools import basistools as _bt\nfrom pygsti.tools import internalgates as _itgs\nfrom pygsti.tools import optools as _ot\nfrom pygsti.tools import listtools as _lt\nfrom pygsti.baseobjs.basisconstructors import sqrt2, id2x2, sigmax, sigmay, sigmaz\nfrom pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter\nfrom pygsti.tools.legacytools import deprecate as _deprecated_fn\n\n\n#############################################\n# Build gates based on \"standard\" gate names\n############################################\ndef create_spam_vector(vec_expr, state_space, basis):\n \"\"\"\n Build a rho or E vector from an expression.\n\n Parameters\n ----------\n vec_expr : string\n the expression which determines which vector to build. Currenlty, only\n integers are allowed, which specify a the vector for the pure state of\n that index. For example, \"1\" means return vectorize(``|1><1|``). The\n index labels the absolute index of the state within the entire state\n space, and is independent of the direct-sum decomposition of density\n matrix space.\n\n state_space : StateSpace\n The state space that the created operation should act upon.\n\n basis : str or Basis\n The basis of the returned vector. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),\n and Qutrit (qt) (or a custom basis object).\n\n Returns\n -------\n numpy array\n The vector specified by vec_expr in the desired basis.\n \"\"\"\n #So far just allow integer prep_expressions that give the index of state (within the state space) that we\n #prep/measure\n try:\n index = int(vec_expr)\n except:\n raise ValueError(\"Expression must be the index of a state (as a string)\")\n\n state_space = _statespace.StateSpace.cast(state_space)\n if isinstance(basis, str):\n basis = _Basis.cast(basis, state_space)\n assert (state_space.dim == basis.dim), \\\n \"State space labels dim (%s) != basis dim (%s)\" % (state_space.dim, basis.dim)\n\n #standard basis that has the same direct-sum structure as `basis`:\n std_basis = basis.create_equivalent('std')\n vecInSimpleStdBasis = _np.zeros(std_basis.elshape, 'd') # a matrix, but flattened it is our spamvec\n vecInSimpleStdBasis[index, index] = 1.0 # now a matrix with just a single 1 on the diag\n vecInReducedStdBasis = _np.dot(std_basis.from_elementstd_transform_matrix, vecInSimpleStdBasis.flatten())\n # translates the density matrix / state vector to the std basis with our desired block structure\n\n vec = _bt.change_basis(vecInReducedStdBasis, std_basis, basis)\n return vec.reshape(-1, 1)\n\n\ndef create_identity_vec(basis):\n \"\"\"\n Build a the identity vector for a given space and basis.\n\n Parameters\n ----------\n basis : Basis object\n The basis of the returned vector. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),\n and Qutrit (qt) (or a custom basis object).\n\n Returns\n -------\n numpy array\n The identity vector in the desired basis.\n \"\"\"\n opDim = basis.dim\n if isinstance(basis, _DirectSumBasis):\n blockDims = [c.dim for c in basis.component_bases]\n else: blockDims = [opDim]\n\n # assume index given as vec_expr refers to a Hilbert-space state index, so \"reduced-std\" basis\n vecInReducedStdBasis = _np.zeros((opDim, 1), 'd')\n\n #set all diagonal elements of density matrix to 1.0 (end result = identity density mx)\n start = 0; vecIndex = 0\n for blockVecDim in blockDims:\n blockDim = int(_np.sqrt(blockVecDim)) # vec -> matrix dim\n for i in range(start, start + blockDim):\n for j in range(start, start + blockDim):\n if i == j: vecInReducedStdBasis[vecIndex, 0] = 1.0 # set diagonal element of density matrix\n vecIndex += 1\n start += blockDim\n return _bt.change_basis(vecInReducedStdBasis, \"std\", basis)\n\n\ndef create_operation(op_expr, state_space, basis=\"pp\", parameterization=\"full\", evotype='default'):\n \"\"\"\n Build an operation object from an expression.\n\n Parameters\n ----------\n op_expr : string\n expression for the gate to build. String is first split into parts\n delimited by the colon (:) character, which are composed together to\n create the final gate. Each part takes on of the allowed forms:\n\n - I(ssl_0, ...) = identity operation on one or more state space labels\n (ssl_i)\n - X(theta, ssl) = x-rotation by theta radians of qubit labeled by ssl\n - Y(theta, ssl) = y-rotation by theta radians of qubit labeled by ssl\n - Z(theta, ssl) = z-rotation by theta radians of qubit labeled by ssl\n - CX(theta, ssl0, ssl1) = controlled x-rotation by theta radians. Acts\n on qubit labeled by ssl1 with ssl0 being the control.\n - CY(theta, ssl0, ssl1) = controlled y-rotation by theta radians. Acts\n on qubit labeled by ssl1 with ssl0 being the control.\n - CZ(theta, ssl0, ssl1) = controlled z-rotation by theta radians. Acts\n on qubit labeled by ssl1 with ssl0 being the control.\n - CNOT(ssl0, ssl1) = standard controlled-not gate. Acts on qubit\n labeled by ssl1 with ssl0 being the control.\n - CPHASE(ssl0, ssl1) = standard controlled-phase gate. Acts on qubit\n labeled by ssl1 with ssl0 being the control.\n - LX(theta, i0, i1) = leakage between states i0 and i1. Implemented as\n an x-rotation between states with integer indices i0 and i1 followed\n by complete decoherence between the states.\n\n state_space : StateSpace\n The state space that the created operation should act upon.\n\n basis : str or Basis\n The basis the returned operation should be represented in.\n\n parameterization : {\"full\",\"TP\",\"static\"}, optional\n How to parameterize the resulting gate.\n\n - \"full\" = return a FullArbitraryOp.\n - \"TP\" = return a FullTPOp.\n - \"static\" = return a StaticArbitraryOp.\n\n evotype : Evotype or str, optional\n The evolution type of this operation, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n Returns\n -------\n LinearOperator\n A gate object representing the gate given by op_expr in the desired\n basis.\n \"\"\"\n # op_expr can contain single qubit ops: X(theta) ,Y(theta) ,Z(theta)\n # two qubit ops: CNOT\n # clevel qubit ops: Leak\n # two clevel opts: Flip\n # each of which is given additional parameters specifying which indices it acts upon\n\n #Working with a StateSpaceLabels object gives us access to all the info we'll need later\n state_space = _statespace.StateSpace.cast(state_space)\n if isinstance(basis, str):\n basis = _Basis.cast(basis, state_space)\n assert(state_space.dim == basis.dim), \\\n \"State space labels dim (%s) != basis dim (%s)\" % (state_space.dim, basis.dim)\n\n # ------------------------------------------------------------------------------------------------------------------\n # -- Helper Functions ----------------------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------------------------------------------\n\n def to_label(lbl):\n \"\"\" Convert integer-strings to integers in state space label \"\"\"\n try: return int(lbl)\n except: return lbl.strip()\n\n def to_labels(lbls):\n \"\"\" Convert integer-strings to integers in state space labels \"\"\"\n return [to_label(lbl) for lbl in lbls]\n\n # ------------------------------------------------------------------------------------------------------------------\n # -- End Helper Functions ------------------------------------------------------------------------------------------\n # ------------------------------------------------------------------------------------------------------------------\n\n #FUTURE?: type_preferences = ('static standard', 'static clifford', 'static unitary')\n build_evotype = 'default'\n superop_mxs_in_basis = []\n exprTerms = op_expr.split(':')\n for exprTerm in exprTerms:\n\n l = exprTerm.index('('); r = exprTerm.rindex(')')\n opName = exprTerm[0:l]\n argsStr = exprTerm[l + 1:r]\n args = argsStr.split(',')\n\n if opName == \"I\":\n # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)\n labels = to_labels(args)\n stateSpaceUDim = int(_np.product([state_space.label_udimension(l) for l in labels]))\n # a complex 2x2 mx unitary for the identity in Pauli-product basis\n Uop = _op.StaticUnitaryOp(_np.identity(stateSpaceUDim, 'complex'), 'pp', build_evotype)\n\n #FUTURE?:\n # stdname = 'Gi' if (stateSpaceUDim == 2) else None\n # Uop = _op.create_from_unitary_mx(_np.identity(stateSpaceUDim, complex), type_preferences, 'pp',\n # stdname=stdname, evotype=evotype)\n\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis\n Uop_embed = _op.EmbeddedOp(state_space, labels, Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName == \"D\":\n # like 'I', but only parameterize the diagonal elements - so can be a depolarization-type map\n raise NotImplementedError(\"Removed temporarily - need to update using embedded gates\")\n # # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)\n # labels = to_labels(args)\n # stateSpaceDim = sslbls.product_dim(labels)\n\n # if parameterization not in (\"linear\",\"linearTP\"):\n # raise ValueError(\"'D' gate only makes sense to use when and parameterization == 'linear'\")\n\n # if defaultI2P == \"TP\":\n # # parameterize only the diagonals els after the first\n # indicesToParameterize = [ (i,i) for i in range(1,stateSpaceDim**2) ]\n # else:\n # # parameterize only the diagonals els\n # indicesToParameterize = [ (i,i) for i in range(0,stateSpaceDim**2) ]\n # # *real* 4x4 mx in Pauli-product basis -- still just the identity!\n # pp_opMx = _np.identity(stateSpaceDim**2, 'd')\n # # pp_opMx assumed to be in the Pauli-product basis\n # opTermInFinalBasis = embed_operation(pp_opMx, tuple(labels), indicesToParameterize)\n\n elif opName in ('X', 'Y', 'Z'): # single-qubit gate names\n assert(len(args) == 2) # theta, qubit-index\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi})\n label = to_label(args[1])\n assert(state_space.label_dimension(label) == 4), \"%s gate must act on qubits!\" % opName\n\n if opName == 'X': ex = -1j * theta * sigmax / 2\n elif opName == 'Y': ex = -1j * theta * sigmay / 2\n elif opName == 'Z': ex = -1j * theta * sigmaz / 2\n\n # complex 2x2 unitary matrix operating on single qubit in Pauli-product basis\n Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', build_evotype)\n\n #FUTURE?:\n #stdname = None\n #if _np.isclose(theta, _np.pi): stdname = 'G%spi' % opName.lower()\n #elif _np.isclose(theta, _np.pi/2): stdname = 'G%spi2' % opName.lower()\n # Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', stdname=stdname, evotype=evotype)\n\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis\n Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName == 'N': # more general single-qubit gate\n assert(len(args) == 5) # theta, sigmaX-coeff, sigmaY-coeff, sigmaZ-coeff, qubit-index\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n sxCoeff = eval(args[1], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n syCoeff = eval(args[2], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n szCoeff = eval(args[3], {\"__builtins__\": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})\n label = to_label(args[4])\n assert(state_space.label_dimension(label) == 4), \"%s gate must act on qubits!\" % opName\n\n ex = -1j * theta * (sxCoeff * sigmax / 2. + syCoeff * sigmay / 2. + szCoeff * sigmaz / 2.)\n # complex 2x2 unitary matrix operating on single qubit in Pauli-product basis\n Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', evotype=build_evotype)\n #FUTURE?: Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', evotype=evotype)\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis\n Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName in ('CX', 'CY', 'CZ', 'CNOT', 'CPHASE'): # two-qubit gate names\n\n if opName in ('CX', 'CY', 'CZ'):\n assert(len(args) == 3) # theta, qubit-label1, qubit-label2\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi})\n label1 = to_label(args[1]); label2 = to_label(args[2])\n\n if opName == 'CX': ex = -1j * theta * sigmax / 2\n elif opName == 'CY': ex = -1j * theta * sigmay / 2\n elif opName == 'CZ': ex = -1j * theta * sigmaz / 2\n Utarget = _spl.expm(ex) # 2x2 unitary matrix operating on target qubit\n\n else: # opName in ('CNOT','CPHASE')\n assert(len(args) == 2) # qubit-label1, qubit-label2\n label1 = to_label(args[0]); label2 = to_label(args[1])\n\n if opName == 'CNOT':\n Utarget = _np.array([[0, 1],\n [1, 0]], 'd')\n elif opName == 'CPHASE':\n Utarget = _np.array([[1, 0],\n [0, -1]], 'd')\n\n # 4x4 unitary matrix operating on isolated two-qubit space\n U = _np.identity(4, 'complex'); U[2:, 2:] = Utarget\n assert(state_space.label_dimension(label1) == 4 and state_space.label_dimension(label2) == 4), \\\n \"%s gate must act on qubits!\" % opName\n # complex 4x4 unitary matrix operating on two-qubit in Pauli-product basis\n Uop = _op.StaticUnitaryOp(U, 'pp', build_evotype)\n\n #FUTURE?:\n # if opName == \"CNOT\": stdname = \"Gcnot\"\n # elif opName == \"CPHASE\": stdname = \"Gcphase\"\n # else: stdname = None\n # Uop = _op.create_from_unitary_mx(U, type_preferences, 'pp', stdname=stdname, evotype=evotype)\n\n # a complex 2*num_qubits x 2*num_qubits mx unitary on full space\n Uop_embed = _op.EmbeddedOp(state_space, [label1, label2], Uop)\n # a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis\n superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')\n # a real 4*num_qubits x 4*num_qubits mx superoperator in final basis\n superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)\n\n elif opName == \"LX\": # TODO - better way to describe leakage?\n assert(len(args) == 3) # theta, dmIndex1, dmIndex2 - X rotation between any two density matrix basis states\n theta = eval(args[0], {\"__builtins__\": None}, {'pi': _np.pi})\n i1 = int(args[1]) # row/column index of a single *state* within the density matrix\n i2 = int(args[2]) # row/column index of a single *state* within the density matrix\n ex = -1j * theta * sigmax / 2\n Uop = _spl.expm(ex) # 2x2 unitary matrix operating on the i1-th and i2-th states of the state space basis\n\n opDim = basis.dim\n dmDim = int(_np.sqrt(basis.elsize)) # matrix dim of the \"embedding space\"\n if isinstance(basis, _DirectSumBasis):\n blockDims = [c.dim for c in basis.component_bases]\n else: blockDims = [opDim]\n\n Utot = _np.identity(dmDim, 'complex')\n Utot[i1, i1] = Uop[0, 0]\n Utot[i1, i2] = Uop[0, 1]\n Utot[i2, i1] = Uop[1, 0]\n Utot[i2, i2] = Uop[1, 1]\n\n # dmDim^2 x dmDim^2 mx operating on vectorized total densty matrix\n opTermInStdBasis = _ot.unitary_to_process_mx(Utot)\n\n # contract [3] to [2, 1]\n embedded_std_basis = _Basis.cast('std', 9) # [2]\n std_basis = _Basis.cast('std', blockDims) # std basis w/blockdim structure, i.e. [4,1]\n opTermInReducedStdBasis = _bt.resize_std_mx(opTermInStdBasis, 'contract',\n embedded_std_basis, std_basis)\n\n superop_mx_in_basis = _bt.change_basis(opTermInReducedStdBasis, std_basis, basis)\n\n else: raise ValueError(\"Invalid gate name: %s\" % opName)\n\n superop_mxs_in_basis.append(superop_mx_in_basis)\n\n #Note: expressions are listed in \"matrix composition order\"\n final_superop_mx = superop_mxs_in_basis[0]\n for mx in superop_mxs_in_basis[1:]:\n final_superop_mx = _np.dot(final_superop_mx, mx)\n\n if basis.real:\n assert(_np.linalg.norm(final_superop_mx.imag) < 1e-6), \"Operation matrix should be real but isn't!\"\n final_superop_mx = _np.real(final_superop_mx)\n\n return _op.create_from_superop_mx(final_superop_mx, parameterization, basis,\n evotype=evotype, state_space=state_space)\n\n\ndef _create_explicit_model_from_expressions(state_space, basis,\n op_labels, op_expressions,\n prep_labels=('rho0',), prep_expressions=('0',),\n effect_labels='standard', effect_expressions='standard',\n povm_labels='Mdefault', gate_type=\"full\", prep_type=\"auto\",\n povm_type=\"auto\", instrument_type=\"auto\", evotype='default'):\n \"\"\"\n Build a new Model given lists of operation labels and expressions.\n\n Parameters\n ----------\n state_space : StateSpace\n The state space for this model.\n\n basis : Basis object\n The source and destination basis, respectively. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),\n and Qutrit (qt) (or a custom basis object).\n\n op_labels : list of strings\n A list of labels for each created gate in the final model. To\n conform with text file parsing conventions these names should begin\n with a capital G and can be followed by any number of lowercase\n characters, numbers, or the underscore character.\n\n op_expressions : list of strings\n A list of gate expressions, each corresponding to a operation label in\n op_labels, which determine what operation each gate performs (see\n documentation for :meth:`create_operation`).\n\n prep_labels : list of string, optional\n A list of labels for each created state preparation in the final\n model. To conform with conventions these labels should begin with\n \"rho\".\n\n prep_expressions : list of strings, optional\n A list of vector expressions for each state preparation vector (see\n documentation for :meth:`_create_spam_vector`).\n\n effect_labels : list, optional\n If `povm_labels` is a string, then this is just a list of the effect\n (outcome) labels for the single POVM. If `povm_labels` is a tuple,\n then `effect_labels` must be a list of lists of effect labels, each\n list corresponding to a POVM. If set to the special string `\"standard\"`\n then the length-n binary strings are used when the state space consists\n of n qubits (e.g. `\"000\"`, `\"001\"`, ... `\"111\"` for 3 qubits) and\n the labels `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used, where `<dim>`\n is the dimension of the state space, in all non-qubit cases.\n\n effect_expressions : list, optional\n A list or list-of-lists of (string) vector expressions for each POVM\n effect vector (see documentation for :meth:`_create_spam_vector`). Expressions\n correspond to labels in `effect_labels`. If set to the special string\n `\"standard\"`, then the expressions `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used,\n where `<dim>` is the dimension of the state space.\n\n povm_labels : list or string, optional\n A list of POVM labels, or a single (string) label. In the latter case,\n only a single POVM is created and the format of `effect_labels` and\n `effect_expressions` is simplified (see above).\n\n parameterization : {\"full\",\"TP\",\"static\"}, optional\n How to parameterize the gates of the resulting Model (see\n documentation for :meth:`create_operation`).\n\n evotype : Evotype or str, optional\n The evolution type of this model, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n Returns\n -------\n Model\n The created model.\n \"\"\"\n #defP = \"TP\" if (parameterization in (\"TP\",\"linearTP\")) else \"full\"\n state_space = _statespace.StateSpace.cast(state_space)\n\n ret = _emdl.ExplicitOpModel(state_space, basis.copy(), default_gate_type=gate_type,\n default_prep_type=prep_type, default_povm_type=povm_type,\n default_instrument_type=instrument_type, evotype=evotype)\n #prep_prefix=\"rho\", effect_prefix=\"E\", gate_prefix=\"G\")\n\n if prep_type == \"auto\":\n prep_type = _state.state_type_from_op_type(gate_type)\n if povm_type == \"auto\":\n povm_type = _povm.povm_type_from_op_type(gate_type)\n if instrument_type == \"auto\":\n instrument_type = _instrument.instrument_type_from_op_type(gate_type)\n\n for label, rhoExpr in zip(prep_labels, prep_expressions):\n vec = create_spam_vector(rhoExpr, state_space, basis)\n ret.preps[label] = _state.create_from_dmvec(vec, prep_type, basis, evotype, state_space)\n\n if isinstance(povm_labels, str):\n povm_labels = [povm_labels]\n effect_labels = [effect_labels]\n effect_expressions = [effect_expressions]\n\n dmDim = int(_np.sqrt(basis.dim)) # \"densitymx\" evotype assumed... FIX?\n for povmLbl, ELbls, EExprs in zip(povm_labels,\n effect_labels, effect_expressions):\n effect_vecs = {}\n\n if ELbls == \"standard\":\n qubit_dim = 4\n if state_space.num_tensor_product_blocks == 1 and \\\n all([ldim == qubit_dim for ldim in state_space.tensor_product_block_dimensions(0)]):\n # a single tensor product block comprised of qubits: '000', '001', etc.\n nQubits = len(state_space.tensor_product_block_dimensions(0))\n ELbls = [''.join(t) for t in _itertools.product(('0', '1'), repeat=nQubits)]\n else:\n ELbls = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim\n if EExprs == \"standard\":\n EExprs = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim\n\n effect_vecs = {label: create_spam_vector(expr, state_space, basis)\n for label, expr in zip(ELbls, EExprs)}\n\n if len(effect_vecs) > 0: # don't add POVMs with 0 effects\n ret.povms[povmLbl] = _povm.create_from_dmvecs(effect_vecs, povm_type, basis, evotype, state_space)\n\n for (opLabel, opExpr) in zip(op_labels, op_expressions):\n ret.operations[opLabel] = create_operation(opExpr, state_space, basis, gate_type, evotype)\n\n if gate_type == \"full\":\n ret.default_gauge_group = _gg.FullGaugeGroup(ret.state_space, evotype)\n elif gate_type == \"full TP\":\n ret.default_gauge_group = _gg.TPGaugeGroup(ret.state_space, evotype)\n elif gate_type == 'CPTP':\n ret.default_gauge_group = _gg.UnitaryGaugeGroup(ret.state_space, basis, evotype)\n else:\n ret.default_gauge_group = _gg.TrivialGaugeGroup(ret.state_space)\n\n ret._clean_paramvec()\n return ret\n\n\ndef create_explicit_model_from_expressions(state_space,\n op_labels, op_expressions,\n prep_labels=('rho0',), prep_expressions=('0',),\n effect_labels='standard', effect_expressions='standard',\n povm_labels='Mdefault', basis=\"auto\", gate_type=\"full\",\n prep_type=\"auto\", povm_type=\"auto\", instrument_type=\"auto\",\n evotype='default'):\n \"\"\"\n Build a new :class:`ExplicitOpModel` given lists of labels and expressions.\n\n Parameters\n ----------\n state_space : StateSpace\n the state space for the model.\n\n op_labels : list of strings\n A list of labels for each created gate in the final model. To\n conform with text file parsing conventions these names should begin\n with a capital G and can be followed by any number of lowercase\n characters, numbers, or the underscore character.\n\n op_expressions : list of strings\n A list of gate expressions, each corresponding to a operation label in\n op_labels, which determine what operation each gate performs (see\n documentation for :meth:`create_operation`).\n\n prep_labels : list of string\n A list of labels for each created state preparation in the final\n model. To conform with conventions these labels should begin with\n \"rho\".\n\n prep_expressions : list of strings\n A list of vector expressions for each state preparation vector (see\n documentation for :meth:`_create_spam_vector`).\n\n effect_labels : list, optional\n If `povm_labels` is a string, then this is just a list of the effect\n (outcome) labels for the single POVM. If `povm_labels` is a tuple,\n then `effect_labels` must be a list of lists of effect labels, each\n list corresponding to a POVM. If set to the special string `\"standard\"`\n then the length-n binary strings are used when the state space consists\n of n qubits (e.g. `\"000\"`, `\"001\"`, ... `\"111\"` for 3 qubits) and\n the labels `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used, where `<dim>`\n is the dimension of the state space, in all non-qubit cases.\n\n effect_expressions : list, optional\n A list or list-of-lists of (string) vector expressions for each POVM\n effect vector (see documentation for :meth:`_create_spam_vector`). Expressions\n correspond to labels in `effect_labels`. If set to the special string\n `\"standard\"`, then the expressions `\"0\"`, `\"1\"`, ... `\"<dim>\"` are used,\n where `<dim>` is the dimension of the state space.\n\n povm_labels : list or string, optional\n A list of POVM labels, or a single (string) label. In the latter case,\n only a single POVM is created and the format of `effect_labels` and\n `effect_expressions` is simplified (see above).\n\n basis : {'gm','pp','std','qt','auto'}, optional\n the basis of the matrices in the returned Model\n\n - \"std\" = operation matrix operates on density mx expressed as sum of matrix\n units\n - \"gm\" = operation matrix operates on dentity mx expressed as sum of\n normalized Gell-Mann matrices\n - \"pp\" = operation matrix operates on density mx expresses as sum of\n tensor-product of Pauli matrices\n - \"qt\" = operation matrix operates on density mx expressed as sum of\n Qutrit basis matrices\n - \"auto\" = \"pp\" if possible (integer num of qubits), \"qt\" if density\n matrix dim == 3, and \"gm\" otherwise.\n\n parameterization : {\"full\",\"TP\"}, optional\n How to parameterize the gates of the resulting Model (see\n documentation for :meth:`create_operation`).\n\n evotype : Evotype or str, optional\n The evolution type of this model, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n Returns\n -------\n ExplicitOpModel\n The created model.\n \"\"\"\n\n #Note: so far, all allowed `parameterization` values => densitymx evotype\n state_space = _statespace.StateSpace.cast(state_space)\n stateSpaceDim = state_space.dim\n # Note: what about state_space_labels.tpb_dims?\n\n if basis == \"auto\":\n if _np.isclose(_np.log2(stateSpaceDim) / 2,\n round(_np.log2(stateSpaceDim) / 2)):\n basis = \"pp\"\n elif stateSpaceDim == 9:\n basis = \"qt\"\n else: basis = \"gm\"\n\n return _create_explicit_model_from_expressions(state_space,\n _Basis.cast(basis, state_space),\n op_labels, op_expressions,\n prep_labels, prep_expressions,\n effect_labels, effect_expressions,\n povm_labels, gate_type=gate_type,\n prep_type=prep_type, povm_type=povm_type,\n instrument_type=instrument_type, evotype=evotype)\n\n\ndef create_explicit_alias_model(mdl_primitives, alias_dict):\n \"\"\"\n Creates a model by applying aliases to an existing model.\n\n The new model is created by composing the gates of an existing `Model`,\n `mdl_primitives`, according to a dictionary of `Circuit`s, `alias_dict`.\n The keys of `alias_dict` are the operation labels of the returned `Model`.\n state preparations and POVMs are unaltered, and simply copied from `mdl_primitives`.\n\n Parameters\n ----------\n mdl_primitives : Model\n A Model containing the \"primitive\" gates (those used to compose\n the gates of the returned model).\n\n alias_dict : dictionary\n A dictionary whose keys are strings and values are Circuit objects\n specifying sequences of primitive gates. Each key,value pair specifies\n the composition rule for a creating a gate in the returned model.\n\n Returns\n -------\n Model\n A model whose gates are compositions of primitive gates and whose\n spam operations are the same as those of `mdl_primitives`.\n \"\"\"\n mdl_new = mdl_primitives.copy()\n for gl in mdl_primitives.operations.keys():\n del mdl_new.operations[gl] # remove all gates from mdl_new\n\n for gl, opstr in alias_dict.items():\n mdl_new.operations[gl] = mdl_primitives.sim.product(opstr)\n #Creates fully parameterized gates by default...\n\n mdl_new._clean_paramvec()\n return mdl_new\n\n\ndef create_explicit_model(processor_spec, custom_gates=None,\n depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,\n depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',\n lindblad_parameterization='auto',\n evotype=\"default\", simulator=\"auto\",\n ideal_gate_type='auto', ideal_spam_type='computational',\n embed_gates=False, basis='pp'):\n\n modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization,\n lindblad_parameterization, allow_nonlocal=True)\n\n return _create_explicit_model(processor_spec, modelnoise, custom_gates, evotype,\n simulator, ideal_gate_type, ideal_spam_type, ideal_spam_type, embed_gates, basis)\n\n\ndef _create_explicit_model(processor_spec, modelnoise, custom_gates=None, evotype=\"default\", simulator=\"auto\",\n ideal_gate_type='auto', ideal_prep_type='auto', ideal_povm_type='auto',\n embed_gates=False, basis='pp'):\n qubit_labels = processor_spec.qubit_labels\n state_space = _statespace.QubitSpace(qubit_labels)\n evotype = _Evotype.cast(evotype)\n modelnoise = _OpModelNoise.cast(modelnoise)\n modelnoise.reset_access_counters()\n\n if custom_gates is None:\n custom_gates = {}\n\n if ideal_gate_type == \"auto\":\n ideal_gate_type = ('static standard', 'static clifford', 'static unitary')\n if ideal_prep_type == \"auto\":\n ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)\n if ideal_povm_type == \"auto\":\n ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)\n\n def _embed_unitary(statespace, target_labels, unitary):\n dummyop = _op.EmbeddedOp(statespace, target_labels,\n _op.StaticUnitaryOp(unitary, basis='pp', evotype=\"statevec_slow\")) # basis hardcode?\n return dummyop.to_dense(\"Hilbert\")\n\n local_gates = _setup_local_gates(processor_spec, evotype, None, {}, ideal_gate_type) # no custom *local* gates\n ret = _emdl.ExplicitOpModel(state_space, basis, default_gate_type=ideal_gate_type, evotype=evotype,\n simulator=simulator)\n\n # Special rule: when initializng an explicit model, if the processor spec has an implied global idle\n # gate (e.g. \"(idle)\", then the created model instead has a empty-tuple Label as the key for this op.\n global_idle_name = processor_spec.global_idle_gate_name\n if (global_idle_name is not None) and global_idle_name.startswith('(') and global_idle_name.endswith(')'):\n gn_to_make_emptytup = global_idle_name\n else:\n gn_to_make_emptytup = None\n\n for gn, gate_unitary in processor_spec.gate_unitaries.items():\n\n gate_is_factory = callable(gate_unitary)\n resolved_avail = processor_spec.resolved_availability(gn)\n\n if callable(resolved_avail) or resolved_avail == '*':\n assert (embed_gates), \"Cannot create factories with `embed_gates=False` yet!\"\n key = _label.Label(gn) if (gn != gn_to_make_emptytup) else _label.Label(())\n allowed_sslbls_fn = resolved_avail if callable(resolved_avail) else None\n gate_nQubits = processor_spec.gate_num_qubits(gn)\n ideal_factory = _opfactory.EmbeddingOpFactory(\n state_space, local_gates[gn], num_target_labels=gate_nQubits, allowed_sslbls_fn=allowed_sslbls_fn)\n noiseop = modelnoise.create_errormap(key, evotype, state_space) # No target indices... just local errs?\n factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])\n ret.factories[key] = factory\n\n else: # resolved_avail is a list/tuple of available sslbls for the current gate/factory\n for inds in resolved_avail: # inds are target qubit labels\n key = _label.Label(()) if (inds is None and gn == gn_to_make_emptytup) else _label.Label(gn, inds)\n\n if key in custom_gates: # allow custom_gates to specify gate elements directly\n if isinstance(custom_gates[key], _opfactory.OpFactory):\n ret.factories[key] = custom_gates[key]\n elif isinstance(custom_gates[key], _op.LinearOperator):\n ret.operations[key] = custom_gates[key]\n else: # presumably a numpy array or something like it.\n ret.operations[key] = _op.StaticArbitraryOp(custom_gates[key], evotype,\n state_space) # static gates by default\n continue\n\n if gate_is_factory:\n assert(embed_gates), \"Cannot create factories with `embed_gates=False` yet!\"\n # TODO: check for modelnoise on *local* factory, i.e. create_errormap(gn, ...)??\n if inds is None or inds == tuple(qubit_labels): # then no need to embed\n ideal_factory = local_gates[gn]\n else:\n ideal_factory = _opfactory.EmbeddedOpFactory(state_space, inds, local_gates[gn])\n noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)\n factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])\n ret.factories[key] = factory\n else:\n if inds is None or inds == tuple(qubit_labels): # then no need to embed\n if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity\n assert(gate_unitary == len(qubit_labels)), \\\n \"Idle unitary as int should be on all qubits for %s\" % (str(gn))\n ideal_gate = _op.ComposedOp([], evotype, state_space) # (identity gate on *all* qubits)\n else:\n ideal_gate = _op.create_from_unitary_mx(gate_unitary, ideal_gate_type, 'pp',\n None, evotype, state_space)\n else:\n if embed_gates:\n ideal_gate = local_gates[gn]\n ideal_gate = _op.EmbeddedOp(state_space, inds, ideal_gate)\n else:\n if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity\n gate_unitary = _np.identity(2**gate_unitary, 'd') # turn into explicit identity op\n if gate_unitary.shape[0] == state_space.udim: # no need to embed!\n embedded_unitary = gate_unitary\n else:\n embedded_unitary = _embed_unitary(state_space, inds, gate_unitary)\n ideal_gate = _op.create_from_unitary_mx(embedded_unitary, ideal_gate_type, 'pp',\n None, evotype, state_space)\n\n #TODO: check for modelnoise on *local* gate, i.e. create_errormap(gn, ...)??\n noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)\n layer = _op.ComposedOp([ideal_gate, noiseop]) if (noiseop is not None) else ideal_gate\n ret.operations[key] = layer\n\n # SPAM:\n local_noise = False; independent_gates = True; independent_spam = True\n prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,\n ideal_prep_type, ideal_povm_type, evotype,\n state_space, independent_gates, independent_spam)\n for k, v in prep_layers.items():\n ret.preps[k] = v\n for k, v in povm_layers.items():\n ret.povms[k] = v\n\n modelnoise.warn_about_zero_counters()\n ret._clean_paramvec()\n return ret\n\n\ndef _create_spam_layers(processor_spec, modelnoise, local_noise,\n ideal_prep_type, ideal_povm_type, evotype, state_space, independent_gates, independent_spam):\n \"\"\" local_noise=True creates lindblad ops that are embedded & composed 1Q ops, and assumes\n that modelnoise specifies 1Q noise. local_noise=False assumes modelnoise specifies n-qubit noise\"\"\"\n qubit_labels = processor_spec.qubit_labels\n num_qubits = processor_spec.num_qubits\n singleQ_state_space = _statespace.default_space_for_udim(2) # single qubit state space\n\n # Step 1 -- get the ideal prep and POVM, created as the types we want\n # Step 2 -- add noise, by composing ideal with a noise operation (if desired)\n prep_layers = {}\n povm_layers = {}\n\n def _add_prep_noise(prep_ops):\n \"\"\" Adds one or more noise ops to prep_ops lists (to compose later) \"\"\"\n if local_noise: # then assume modelnoise specifies 1Q errors\n prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)\n if prep_noiseop1Q is not None:\n err_gates = [prep_noiseop1Q.copy() for i in range(num_qubits)] \\\n if independent_gates else [prep_noiseop1Q] * num_qubits\n prep_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])\n for i in range(num_qubits)])\n else: # use modelnoise to construct n-qubit noise\n prepNoiseMap = modelnoise.create_errormap('prep', evotype, state_space, target_labels=None,\n qubit_graph=processor_spec.qubit_graph)\n if prepNoiseMap is not None: prep_ops.append(prepNoiseMap)\n\n def _add_povm_noise(povm_ops):\n \"\"\" Adds one or more noise ops to prep_ops lists (to compose later) \"\"\"\n if local_noise: # then assume modelnoise specifies 1Q errors\n povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)\n if povm_noiseop1Q is not None:\n err_gates = [povm_noiseop1Q.copy() for i in range(num_qubits)] \\\n if independent_gates else [povm_noiseop1Q] * num_qubits\n povm_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])\n for i in range(num_qubits)])\n else: # use modelnoise to construct n-qubit noise\n povmNoiseMap = modelnoise.create_errormap('povm', evotype, state_space, target_labels=None,\n qubit_graph=processor_spec.qubit_graph)\n if povmNoiseMap is not None: povm_ops.append(povmNoiseMap)\n\n def _add_to_prep_layers(ideal_prep, prep_ops):\n \"\"\" Adds noise elements to prep_layers \"\"\"\n if len(prep_ops_to_compose) == 0:\n prep_layers['rho0'] = ideal_prep\n elif len(prep_ops_to_compose) == 1:\n prep_layers['rho0'] = _state.ComposedState(ideal_prep, prep_ops[0])\n else:\n prep_layers['rho0'] = _state.ComposedState(ideal_prep, _op.ComposedOp(prep_ops))\n\n def _add_to_povm_layers(ideal_povm, povm_ops):\n \"\"\" Adds noise elements to povm_layers \"\"\"\n if len(povm_ops_to_compose) == 0:\n povm_layers['Mdefault'] = ideal_povm\n elif len(povm_ops_to_compose) == 1:\n povm_layers['Mdefault'] = _povm.ComposedPOVM(povm_ops[0], ideal_povm, 'pp')\n else:\n povm_layers['Mdefault'] = _povm.ComposedPOVM(_op.ComposedOp(povm_ops), ideal_povm, 'pp')\n\n def _create_nq_noise(lndtype):\n if local_noise:\n # create a 1-qubit exp(errorgen) that is applied to each qubit independently\n errgen_1Q = _op.LindbladErrorgen.from_error_generator(singleQ_state_space.dim, lndtype, 'pp', 'pp',\n truncate=True, evotype=evotype, state_space=None)\n err_gateNQ = _op.ComposedOp([_op.EmbeddedOp(state_space, [qubit_labels[i]],\n _op.ExpErrorgenOp(errgen_1Q.copy()))\n for i in range(num_qubits)], evotype, state_space)\n else:\n # create an n-qubit exp(errorgen)\n errgen_NQ = _op.LindbladErrorgen.from_error_generator(state_space.dim, lndtype, 'pp', 'pp',\n truncate=True, evotype=evotype,\n state_space=state_space)\n err_gateNQ = _op.ExpErrorgenOp(errgen_NQ)\n return err_gateNQ\n\n # Here's where the actual logic starts. The above functions avoid repeated blocks within the different\n # cases below.\n\n # Prep logic\n if isinstance(ideal_prep_type, (tuple, list)): ideal_prep_type = ideal_prep_type[0] # HACK to support multiple vals\n if ideal_prep_type == 'computational' or ideal_prep_type.startswith('lindblad '):\n ideal_prep = _state.ComputationalBasisState([0] * num_qubits, 'pp', evotype, state_space)\n\n prep_ops_to_compose = []\n if ideal_prep_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM\n lndtype = ideal_prep_type[len('lindblad '):]\n\n err_gateNQ = _create_nq_noise(lndtype)\n\n prep_ops_to_compose.append(err_gateNQ)\n\n # Add noise\n _add_prep_noise(prep_ops_to_compose)\n\n #Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)\n _add_to_prep_layers(ideal_prep, prep_ops_to_compose)\n\n elif ideal_prep_type.startswith('tensor product '):\n #Note: with \"tensor product <X>\" types, e.g. \"tensor product static\", we assume modelnoise specifies just\n # a 1Q noise operation, even when `local_noise=False`\n vectype = ideal_prep_type[len('tensor product '):]\n\n v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')\n ideal_prep1Q = _state.create_from_pure_vector(v0, vectype, 'pp', evotype, state_space=None)\n prep_factors = [ideal_prep1Q.copy() for i in range(num_qubits)]\n\n # Add noise\n prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)\n if prep_noiseop1Q is not None:\n prep_factors = [_state.ComposedState(\n factor, (prep_noiseop1Q.copy() if independent_spam else prep_noiseop1Q)) for factor in prep_factors]\n\n prep_layers['rho0'] = _state.TensorProductState(prep_factors, state_space)\n\n else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs\n\n vectype = ideal_prep_type\n vecs = [] # all the basis vectors for num_qubits\n for i in range(2**num_qubits):\n v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0\n vecs.append(v)\n\n ideal_prep = _state.create_from_pure_vector(vecs[0], vectype, 'pp', evotype, state_space=state_space)\n\n # Add noise\n prep_ops_to_compose = []\n _add_prep_noise(prep_ops_to_compose)\n\n # Add final ops to returned dictionaries\n _add_to_prep_layers(ideal_prep, prep_ops_to_compose)\n\n # Povm logic\n if isinstance(ideal_povm_type, (tuple, list)): ideal_povm_type = ideal_povm_type[0] # HACK to support multiple vals\n if ideal_povm_type == 'computational' or ideal_povm_type.startswith('lindblad '):\n ideal_povm = _povm.ComputationalBasisPOVM(num_qubits, evotype, state_space=state_space)\n\n povm_ops_to_compose = []\n if ideal_povm_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM\n lndtype = ideal_povm_type[len('lindblad '):]\n\n err_gateNQ = _create_nq_noise(lndtype)\n\n povm_ops_to_compose.append(err_gateNQ.copy()) # .copy() => POVM errors independent\n\n # Add noise\n _add_povm_noise(povm_ops_to_compose)\n\n #Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)\n effective_ideal_povm = None if len(povm_ops_to_compose) > 0 else ideal_povm\n _add_to_povm_layers(effective_ideal_povm, povm_ops_to_compose)\n\n elif ideal_povm_type.startswith('tensor product '):\n #Note: with \"tensor product <X>\" types, e.g. \"tensor product static\", we assume modelnoise specifies just\n # a 1Q noise operation, even when `local_noise=False`\n vectype = ideal_povm_type[len('tensor product '):]\n\n v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')\n ideal_povm1Q = _povm.create_from_pure_vectors([('0', v0), ('1', v1)], vectype, 'pp',\n evotype, state_space=None)\n povm_factors = [ideal_povm1Q.copy() for i in range(num_qubits)]\n\n # Add noise\n povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)\n if povm_noiseop1Q is not None:\n povm_factors = [_povm.ComposedPOVM(\n (povm_noiseop1Q.copy() if independent_spam else povm_noiseop1Q), factor, 'pp')\n for factor in povm_factors]\n\n povm_layers['Mdefault'] = _povm.TensorProductPOVM(povm_factors, evotype, state_space)\n\n else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs\n\n vectype = ideal_povm_type\n vecs = [] # all the basis vectors for num_qubits\n for i in range(2**num_qubits):\n v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0\n vecs.append(v)\n\n ideal_povm = _povm.create_from_pure_vectors(\n [(format(i, 'b').zfill(num_qubits), v) for i, v in enumerate(vecs)],\n vectype, 'pp', evotype, state_space=state_space)\n\n # Add noise\n povm_ops_to_compose = []\n _add_povm_noise(povm_ops_to_compose)\n\n # Add final ops to returned dictionaries\n _add_to_povm_layers(ideal_povm, povm_ops_to_compose)\n\n return prep_layers, povm_layers\n\n\ndef _setup_local_gates(processor_spec, evotype, modelnoise=None, custom_gates=None,\n ideal_gate_type=('static standard', 'static clifford', 'static unitary')):\n \"\"\"\n Construct a dictionary of potentially noisy gates that act only on their target qubits.\n\n These gates are \"local\" because they act only on their intended target qubits. The gates\n consist of an ideal gate (obviously local, and crosstalk free) of the type given by\n `ideal_gate_type` composed with a noise operation given by `modelnoise`, if one exists.\n The returned dictionary contains keys for all the gate names in `processor_spec`. Custom\n gate objects can be given by `custom_gates`, which override the normal gate construction.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor to create gate operations for. This object specifies the\n gate names and unitaries for the processor, among other things.\n\n evotype : Evotype\n Create gate objects with this evolution type.\n\n modelnoise : ModelNoise, optional\n Noise that should be applied after the ideal gates. This noise must\n be *local* to each gate (i.e. acting on its target qubits). See the\n :class:`ModelNoise` object documentation for details regarding how\n to specify different types of noise. If `None`, then no noise is added .\n\n custom_gates : dict, optional\n A dictionary of gate objects that should be placed in the returned\n dictionary in lieu of objects that would normally be constructed.\n Keys are gate names and values are gates.\n\n ideal_gate_type : str or tuple, optional\n A gate type or tuple of gate types (listed in order of priority) which\n is used to construct the ideal gates. A gate type usually specifies the\n Python class that will be created, which determines 1) the parameterization\n of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`\n operation has no parameters and is a Clifford operation).\n\n Returns\n -------\n gatedict : dict\n A dictionary mapping gate names to local gate operations.\n \"\"\"\n std_gate_unitaries = _itgs.standard_gatename_unitaries()\n if custom_gates is None: custom_gates = {}\n if modelnoise is None: modelnoise = _OpModelPerOpNoise({})\n\n # All possible entries into the upcoming gate dictionary\n # Not just gatenames as it is possible to override in qubit-specific operations\n all_keys = _lt.remove_duplicates(list(processor_spec.gate_names)\n + list(custom_gates.keys())\n + list(modelnoise.keys()))\n\n # Cache ideal ops to ensure only one copy for each name\n ideal_gates = {}\n ideal_factories = {}\n\n gatedict = _collections.OrderedDict()\n for key in all_keys:\n # Use custom gate directly as error gate\n if key in custom_gates:\n gatedict[key] = custom_gates[key]\n continue\n\n # Skip prep, and povm here, just do gates\n if key in ['prep', 'povm']:\n continue\n\n # If key has qubits, get base name for lookup\n label = _label.Label(key)\n name = label.name\n\n U = processor_spec.gate_unitaries[name] # all gate names must be in the processorspec\n if ((name not in processor_spec.nonstd_gate_unitaries)\n or (not callable(processor_spec.nonstd_gate_unitaries[name]) and (name in std_gate_unitaries)\n and processor_spec.nonstd_gate_unitaries[name].shape == std_gate_unitaries[name].shape\n and _np.allclose(processor_spec.nonstd_gate_unitaries[name], std_gate_unitaries[name]))):\n stdname = name # setting `stdname` != None means we can try to create a StaticStandardOp below\n else:\n stdname = None\n\n if isinstance(U, (int, _np.int64)): # signals that the gate is an identity on `U` qubits\n ideal_gate_state_space = _statespace.default_space_for_num_qubits(U)\n noiseop = modelnoise.create_errormap(key, evotype, ideal_gate_state_space, target_labels=None)\n if noiseop is not None:\n gatedict[key] = noiseop\n else:\n gatedict[key] = _op.ComposedOp([], evotype, ideal_gate_state_space) # (identity gate on N qubits)\n\n elif not callable(U): # normal operation (not a factory)\n ideal_gate = ideal_gates.get(name, None)\n if ideal_gate is None:\n ideal_gate = _op.create_from_unitary_mx(U, ideal_gate_type, 'pp', stdname, evotype, state_space=None)\n ideal_gates[name] = ideal_gate\n noiseop = modelnoise.create_errormap(key, evotype, ideal_gate.state_space, target_labels=None)\n # Note: above line creates a *local* noise op, working entirely in the ideal gate's target space.\n # This means it will fail to create error maps with a given (non-local/stencil) set of sslbls, as desired\n\n if noiseop is None:\n gatedict[key] = ideal_gate\n else:\n if isinstance(noiseop, _op.ComposedOp): # avoid additional nested ComposedOp if we already have one\n noiseop.insert(0, ideal_gate)\n gatedict[key] = noiseop\n else:\n gatedict[key] = _op.ComposedOp([ideal_gate, noiseop])\n\n else: # a factory, given by the unitary-valued function U: args -> unitary\n ideal_factory = ideal_factories.get(name, None)\n if ideal_factory is None:\n local_state_space = _statespace.default_space_for_udim(U.shape[0]) # factory *function* SHAPE\n ideal_factory = _opfactory.UnitaryOpFactory(U, local_state_space, 'pp', evotype)\n ideal_factories[name] = ideal_factory\n noiseop = modelnoise.create_errormap(key, evotype, ideal_factory.state_space, target_labels=None)\n gatedict[key] = _opfactory.ComposedOpFactory([ideal_factory, noiseop]) \\\n if (noiseop is not None) else ideal_factory\n return gatedict\n\n\ndef create_crosstalk_free_model(processor_spec, custom_gates=None,\n depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,\n depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',\n lindblad_parameterization='auto',\n evotype=\"default\", simulator=\"auto\", on_construction_error='raise',\n independent_gates=False, independent_spam=True, ensure_composed_gates=False,\n ideal_gate_type='auto', ideal_spam_type='computational', implicit_idle_mode='none'):\n \"\"\"\n Create a n-qubit \"crosstalk-free\" model.\n\n By virtue of being crosstalk-free, this model's operations only\n act nontrivially on their target qubits. Gates consist of an ideal gate\n operation possibly followed by an error operation.\n\n Errors can be specified using any combination of the 4 error rate/coeff arguments,\n but each gate name must be provided exclusively to one type of specification.\n Each specification results in a different type of operation, depending on the parameterization:\n - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)\n - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)\n - `lindblad_error_coeffs` -> exp(LindbladErrorgen)\n\n In addition to the gate names, the special values `\"prep\"` and `\"povm\"` may be\n used as keys to specify the error on the state preparation, measurement, respectively.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor specification to create a model for. This object specifies the\n gate names and unitaries for the processor, and their availability on the\n processor.\n\n custom_gates : dict, optional\n A dictionary that associates with gate labels\n :class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`\n objects. These objects override any other behavior for constructing\n their designated operations. Keys of this dictionary may\n be string-type gate *names* or labels that include target qubits.\n\n depolarization_strengths : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are floats that specify the strength of uniform depolarization.\n\n stochastic_error_probs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are tuples that specify Pauli-stochastic rates for each of the non-trivial\n Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).\n\n lindblad_error_coeffs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are dictionaries corresponding to the `lindblad_term_dict` kwarg taken\n by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`\n tuples, where `termType` can be `\"H\"` (Hamiltonian), `\"S\"`\n (Stochastic), or `\"A\"` (Affine). Hamiltonian and Affine terms always\n have a single basis label (so key is a 2-tuple) whereas Stochastic\n tuples with 1 basis label indicate a *diagonal* term, and are the\n only types of terms allowed when `nonham_mode != \"all\"`. Otherwise,\n Stochastic term tuples can include 2 basis labels to specify\n \"off-diagonal\" non-Hamiltonian Lindblad terms. Basis labels can be\n strings or integers. Values are complex coefficients.\n\n depolarization_parameterization : str of {\"depolarize\", \"stochastic\", or \"lindblad\"}\n Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen\n is used to parameterize the depolarization noise, respectively.\n When \"depolarize\" (the default), a DepolarizeOp is created with the strength given\n in `depolarization_strengths`. When \"stochastic\", the depolarization strength is split\n evenly among the stochastic channels of a StochasticOp. When \"lindblad\", the depolarization\n strength is split evenly among the coefficients of the stochastic error generators\n (which are exponentiated to form a LindbladErrorgen with the \"depol\" parameterization).\n\n stochastic_parameterization : str of {\"stochastic\", or \"lindblad\"}\n Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the\n stochastic noise, respectively. When \"stochastic\", elements of `stochastic_error_probs`\n are used as coefficients in a linear combination of stochastic channels (the default).\n When \"lindblad\", the elements of `stochastic_error_probs` are coefficients of\n stochastic error generators (which are exponentiated to form a LindbladErrorgen with the\n \"cptp\" parameterization).\n\n lindblad_parameterization : \"auto\" or a LindbladErrorgen paramtype\n Determines the parameterization of the LindbladErrorgen. When \"auto\" (the default), the parameterization\n is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.\n When not \"auto\", the parameterization type is passed through to the LindbladErrorgen.\n\n evotype : Evotype or str, optional\n The evolution type. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n simulator : ForwardSimulator or {\"auto\", \"matrix\", \"map\"}\n The simulator used to compute predicted probabilities for the\n resulting :class:`Model`. Using `\"auto\"` selects `\"matrix\"` when there\n are 2 qubits or less, and otherwise selects `\"map\"`.\n\n on_construction_error : {'raise','warn',ignore'}\n What to do when the creation of a gate with the given\n `parameterization` fails. Usually you'll want to `\"raise\"` the error.\n In some cases, for example when converting as many gates as you can\n into `parameterization=\"clifford\"` gates, `\"warn\"` or even `\"ignore\"`\n may be useful.\n\n independent_gates : bool, optional\n Whether gates are allowed independent local noise or not. If False,\n then all gates with the same name (e.g. \"Gx\") will have the *same*\n (local) noise (e.g. an overrotation by 1 degree), and the\n `operation_bks['gates']` dictionary contains a single key per gate\n name. If True, then gates with the same name acting on different\n qubits may have different local noise, and so the\n `operation_bks['gates']` dictionary contains a key for each gate\n available gate placement.\n\n ensure_composed_gates : bool, optional\n If True then the elements of the `operation_bks['gates']` will always\n be :class:`ComposedOp` objects. The purpose of this is to\n facilitate modifying the gate operations after the model is created.\n If False, then the appropriately parameterized gate objects (often\n dense gates) are used directly.\n\n ideal_gate_type : str or tuple, optional\n A gate type or tuple of gate types (listed in order of priority) which\n is used to construct the ideal gates. A gate type usually specifies the\n Python class that will be created, which determines 1) the parameterization\n of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`\n operation has no parameters and is a Clifford operation).\n\n ideal_spam_type : str or tuple, optional\n Similar to `ideal_gate_type` but for SPAM elements (state preparations\n and POVMs).\n\n implicit_idle_mode : {'none', 'add_global'}\n The way idel operations are added implicitly within the created model. `\"none\"`\n doesn't add any \"extra\" idle operations when there is a layer that contains some\n gates but not gates on all the qubits. `\"add_global\"` adds the global idle operation,\n i.e., the operation for a global idle layer (zero gates - a completely empty layer),\n to every layer that is simulated, using the global idle as a background idle that always\n occurs regardless of the operation.\n\n Returns\n -------\n LocalNoiseModel\n A model with `\"rho0\"` prep, `\"Mdefault\"` POVM, and gates labeled by\n the gate names and qubit labels (as specified by `processor_spec`).\n For instance, the operation label for the `\"Gx\"` gate on the second\n qubit might be `Label(\"Gx\",1)`.\n \"\"\"\n modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization,\n lindblad_parameterization, allow_nonlocal=False)\n\n return _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates, evotype,\n simulator, on_construction_error, independent_gates, independent_spam,\n ensure_composed_gates, ideal_gate_type, ideal_spam_type, ideal_spam_type,\n implicit_idle_mode)\n\n\ndef _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates=None, evotype=\"default\", simulator=\"auto\",\n on_construction_error='raise', independent_gates=False, independent_spam=True,\n ensure_composed_gates=False, ideal_gate_type='auto', ideal_prep_type='auto',\n ideal_povm_type='auto', implicit_idle_mode='none'):\n \"\"\"\n Create a n-qubit \"crosstalk-free\" model.\n\n Similar to :method:`create_crosstalk_free_model` but the noise is input more generally,\n as a :class:`ModelNoise` object. Arguments are the same as this function except that\n `modelnoise` is given instead of several more specific noise-describing arguments.\n\n Returns\n -------\n LocalNoiseModel\n \"\"\"\n qubit_labels = processor_spec.qubit_labels\n state_space = _statespace.QubitSpace(qubit_labels)\n evotype = _Evotype.cast(evotype)\n modelnoise = _OpModelNoise.cast(modelnoise)\n modelnoise.reset_access_counters()\n\n if ideal_gate_type == \"auto\":\n ideal_gate_type = ('static standard', 'static clifford', 'static unitary')\n if ideal_prep_type == \"auto\":\n ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)\n if ideal_povm_type == \"auto\":\n ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)\n\n gatedict = _setup_local_gates(processor_spec, evotype, modelnoise, custom_gates, ideal_gate_type)\n\n # (Note: global idle is now handled through processor-spec processing)\n\n # SPAM:\n local_noise = True\n prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,\n ideal_prep_type, ideal_povm_type, evotype,\n state_space, independent_gates, independent_spam)\n\n modelnoise.warn_about_zero_counters()\n return _LocalNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,\n evotype, simulator, on_construction_error,\n independent_gates, ensure_composed_gates,\n implicit_idle_mode)\n\n\ndef create_cloud_crosstalk_model(processor_spec, custom_gates=None,\n depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,\n depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',\n lindblad_parameterization='auto', evotype=\"default\", simulator=\"auto\",\n independent_gates=False, independent_spam=True, errcomp_type=\"gates\",\n implicit_idle_mode=\"none\", verbosity=0):\n \"\"\"\n Create a n-qubit \"cloud-crosstalk\" model.\n\n In a cloud crosstalk model, gates consist of a (local) ideal gates followed\n by an error operation that can act nontrivially on *any* of the processor's qubits\n (not just a gate's target qubits). Typically a gate's errors are specified\n relative to the gate's target qubits, forming a \"cloud\" of errors around the\n target qubits using some notion of locality (that may not be spatial, e.g.\n local in frequency). Currently, the \"ideal\" portion of each gate can only be\n created as a *static* (parameterless) object -- all gate parameters come from\n the error operation.\n\n Errors can be specified using any combination of the 4 error rate/coeff arguments,\n but each gate name must be provided exclusively to one type of specification.\n Each specification results in a different type of operation, depending on the parameterization:\n - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)\n - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)\n - `lindblad_error_coeffs` -> exp(LindbladErrorgen)\n\n In addition to the gate names, the special values `\"prep\"` and `\"povm\"` may be\n used as keys to specify the error on the state preparation, measurement, respectively.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor specification to create a model for. This object specifies the\n gate names and unitaries for the processor, and their availability on the\n processor.\n\n custom_gates : dict, optional\n A dictionary that associates with gate labels\n :class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`\n objects. These objects override any other behavior for constructing\n their designated operations. Keys of this dictionary may\n be string-type gate *names* or labels that include target qubits.\n\n depolarization_strengths : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are floats that specify the strength of uniform depolarization.\n\n stochastic_error_probs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are tuples that specify Pauli-stochastic rates for each of the non-trivial\n Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).\n\n lindblad_error_coeffs : dict, optional\n A dictionary whose keys are gate names (e.g. `\"Gx\"`) and whose values\n are dictionaries corresponding to the `lindblad_term_dict` kwarg taken\n by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`\n tuples, where `termType` can be `\"H\"` (Hamiltonian), `\"S\"`\n (Stochastic), or `\"A\"` (Affine). Hamiltonian and Affine terms always\n have a single basis label (so key is a 2-tuple) whereas Stochastic\n tuples with 1 basis label indicate a *diagonal* term, and are the\n only types of terms allowed when `nonham_mode != \"all\"`. Otherwise,\n Stochastic term tuples can include 2 basis labels to specify\n \"off-diagonal\" non-Hamiltonian Lindblad terms. Basis labels can be\n strings or integers. Values are complex coefficients.\n\n depolarization_parameterization : str of {\"depolarize\", \"stochastic\", or \"lindblad\"}\n Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen\n is used to parameterize the depolarization noise, respectively.\n When \"depolarize\" (the default), a DepolarizeOp is created with the strength given\n in `depolarization_strengths`. When \"stochastic\", the depolarization strength is split\n evenly among the stochastic channels of a StochasticOp. When \"lindblad\", the depolarization\n strength is split evenly among the coefficients of the stochastic error generators\n (which are exponentiated to form a LindbladErrorgen with the \"depol\" parameterization).\n\n stochastic_parameterization : str of {\"stochastic\", or \"lindblad\"}\n Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the\n stochastic noise, respectively. When \"stochastic\", elements of `stochastic_error_probs`\n are used as coefficients in a linear combination of stochastic channels (the default).\n When \"lindblad\", the elements of `stochastic_error_probs` are coefficients of\n stochastic error generators (which are exponentiated to form a LindbladErrorgen with the\n \"cptp\" parameterization).\n\n lindblad_parameterization : \"auto\" or a LindbladErrorgen paramtype\n Determines the parameterization of the LindbladErrorgen. When \"auto\" (the default), the parameterization\n is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.\n When not \"auto\", the parameterization type is passed through to the LindbladErrorgen.\n\n evotype : Evotype or str, optional\n The evolution type. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n simulator : ForwardSimulator or {\"auto\", \"matrix\", \"map\"}\n The simulator used to compute predicted probabilities for the\n resulting :class:`Model`. Using `\"auto\"` selects `\"matrix\"` when there\n are 2 qubits or less, and otherwise selects `\"map\"`.\n\n independent_gates : bool, optional\n Whether gates are allowed independent noise or not. If False,\n then all gates with the same name (e.g. \"Gx\") will have the *same*\n noise (e.g. an overrotation by 1 degree), and the\n `operation_bks['cloudnoise']` dictionary will contains a single key per gate\n name. If True, then gates with the same name acting on different\n qubits may have different local noise, and so the\n `operation_bks['cloudnoise']` dictionary contains a key for each gate\n available gate placement.\n\n independent_spam : bool, optional\n Similar to `indepenent_gates` but for SPAM operations.\n\n errcomp_type : {'gates', 'errorgens'}\n Whether errors should be combined by composing error maps (`gates`) or by\n exponentiating the sum of error generators (composing the error generators,\n `errorgens`). The latter is only an option when the noise is given solely\n in terms of Lindblad error coefficients.\n\n implicit_idle_mode : {'none', 'add_global'}\n The way idel operations are added implicitly within the created model. `\"none\"`\n doesn't add any \"extra\" idle operations when there is a layer that contains some\n gates but not gates on all the qubits. `\"add_global\"` adds the global idle operation,\n i.e., the operation for a global idle layer (zero gates - a completely empty layer),\n to every layer that is simulated, using the global idle as a background idle that always\n occurs regardless of the operation.\n\n verbosity : int or VerbosityPrinter, optional\n Amount of detail to print to stdout.\n\n Returns\n -------\n CloudNoiseModel\n \"\"\"\n\n modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization,\n lindblad_parameterization, allow_nonlocal=True)\n\n return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates, evotype,\n simulator, independent_gates, independent_spam, errcomp_type,\n implicit_idle_mode, verbosity)\n\n\ndef _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates=None,\n evotype=\"default\", simulator=\"auto\", independent_gates=False,\n independent_spam=True, errcomp_type=\"errorgens\",\n implicit_idle_mode=\"none\", verbosity=0):\n \"\"\"\n Create a n-qubit \"cloud-crosstalk\" model.\n\n Similar to :method:`create_cloud_crosstalk_model` but the noise is input more generally,\n as a :class:`ModelNoise` object. Arguments are the same as this function except that\n `modelnoise` is given instead of several more specific noise-describing arguments.\n\n Returns\n -------\n CloudNoiseModel\n \"\"\"\n qubit_labels = processor_spec.qubit_labels\n state_space = _statespace.QubitSpace(qubit_labels) # FUTURE: allow other types of state spaces somehow?\n evotype = _Evotype.cast(evotype)\n modelnoise = _OpModelNoise.cast(modelnoise)\n modelnoise.reset_access_counters()\n printer = _VerbosityPrinter.create_printer(verbosity)\n\n #Create static ideal gates without any noise (we use `modelnoise` further down)\n gatedict = _setup_local_gates(processor_spec, evotype, None, custom_gates,\n ideal_gate_type=('static standard', 'static clifford', 'static unitary'))\n stencils = _collections.OrderedDict()\n\n # (Note: global idle is now processed with other processorspec gates)\n\n # SPAM\n local_noise = False\n prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,\n 'computational', 'computational', evotype, state_space,\n independent_gates, independent_spam)\n\n if errcomp_type == 'gates':\n create_stencil_fn = modelnoise.create_errormap_stencil\n apply_stencil_fn = modelnoise.apply_errormap_stencil\n elif errcomp_type == 'errorgens':\n create_stencil_fn = modelnoise.create_errorgen_stencil\n apply_stencil_fn = modelnoise.apply_errorgen_stencil\n else:\n raise ValueError(\"Invalid `errcomp_type` value: %s\" % str(errcomp_type))\n\n def build_cloudnoise_fn(lbl):\n # lbl will be for a particular gate and target qubits. If we have error rates for this specific gate\n # and target qubits (i.e this primitive layer op) then we should build it directly (and independently,\n # regardless of the value of `independent_gates`) using these rates. Otherwise, if we have a stencil\n # for this gate, then we should use it to construct the output, using a copy when gates are independent\n # and a reference to the *same* stencil operations when `independent_gates==False`.\n\n num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None\n if lbl in modelnoise:\n stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)\n elif lbl.name in stencils:\n stencil = stencils[lbl.name]\n elif lbl.name in modelnoise:\n stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)\n stencil = stencils[lbl.name]\n else:\n return None # no cloudnoise error for this label\n\n return apply_stencil_fn(stencil, evotype, state_space, target_labels=lbl.sslbls,\n qubit_graph=processor_spec.qubit_graph,\n copy=independent_gates and (lbl not in modelnoise)) # no need to copy if first case\n\n def build_cloudkey_fn(lbl):\n num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None\n if lbl in modelnoise:\n stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)\n elif lbl.name in stencils:\n stencil = stencils[lbl.name]\n elif lbl.name in modelnoise:\n stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)\n stencil = stencils[lbl.name]\n else:\n # simple cloud-key when there is no cloud noise\n return tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels\n\n #Otherwise, process stencil to get a list of all the qubit labels `lbl`'s cloudnoise error\n # touches and form this into a key\n cloud_sslbls = modelnoise.compute_stencil_absolute_sslbls(stencil, state_space, lbl.sslbls,\n processor_spec.qubit_graph)\n hashable_sslbls = tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels\n cloud_key = (hashable_sslbls, tuple(sorted(cloud_sslbls))) # (sets are unhashable)\n return cloud_key\n\n ret = _CloudNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,\n build_cloudnoise_fn, build_cloudkey_fn,\n simulator, evotype, errcomp_type,\n implicit_idle_mode, printer)\n modelnoise.warn_about_zero_counters() # must do this after model creation so build_ fns have been run\n return ret\n\n\ndef create_cloud_crosstalk_model_from_hops_and_weights(\n processor_spec, custom_gates=None,\n max_idle_weight=1, max_spam_weight=1,\n maxhops=0, extra_weight_1_hops=0, extra_gate_weight=0,\n simulator=\"auto\", evotype='default',\n gate_type=\"H+S\", spam_type=\"H+S\",\n implicit_idle_mode=\"none\", errcomp_type=\"gates\",\n independent_gates=True, independent_spam=True,\n connected_highweight_errors=True,\n verbosity=0):\n \"\"\"\n Create a \"cloud crosstalk\" model based on maximum error weights and hops along the processor's qubit graph.\n\n This function provides a convenient way to construct cloud crosstalk models whose gate errors\n consist of Pauli elementary error generators (i.e. that correspond to Lindblad error coefficients)\n that are limited in weight (number of non-identity Paulis) and support (which qubits have non-trivial\n Paulis on them). Errors are taken to be approximately local, meaning they are concentrated near the\n target qubits of a gate, with the notion of locality taken from the processor specification's qubit graph.\n The caller provides maximum-weight, maximum-hop (a \"hop\" is the movement along a single graph edge), and\n gate type arguments to specify the set of possible errors on a gate.\n\n - The global idle gate (corresponding to an empty circuit layer) has errors that are limited only by\n a maximum weight, `max_idle_weight`.\n - State preparation and POVM errors are constructed similarly, with a global-idle-like error following\n or preceding the preparation or measurement, respectively.\n - Gate errors are placed on all the qubits that can be reached with at most `maxhops` hops from (any of)\n the gate's target qubits. Elementary error generators up to weight `W`, where `W` equals the number\n of target qubits (e.g., 2 for a CNOT gate) plus `extra_gate_weight` are allowed. Weight-1 terms\n are a special case, and the `extra_weight_1_hops` argument adds to the usual `maxhops` in this case\n to allow weight-1 errors on a possibly larger region of qubits around the target qubits.\n\n Parameters\n ----------\n processor_spec : ProcessorSpec\n The processor specification to create a model for. This object specifies the\n gate names and unitaries for the processor, and their availability on the\n processor.\n\n custom_gates : dict\n A dictionary that associates with gate labels\n :class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`\n objects. These objects describe the full action of the gate or\n primitive-layer they're labeled by (so if the model represents\n states by density matrices these objects are superoperators, not\n unitaries), and override any standard construction based on builtin\n gate names or `nonstd_gate_unitaries`. Keys of this dictionary must\n be string-type gate *names* -- they cannot include state space labels\n -- and they must be *static* (have zero parameters) because they\n represent only the ideal behavior of each gate -- the cloudnoise\n operations represent the parameterized noise. To fine-tune how this\n noise is parameterized, call the :class:`CloudNoiseModel` constructor\n directly.\n\n max_idle_weight : int, optional\n The maximum-weight for errors on the global idle gate.\n\n max_spam_weight : int, optional\n The maximum-weight for state preparation and measurement (SPAM) errors.\n\n maxhops : int\n The locality constraint: for a gate, errors (of weight up to the\n maximum weight for the gate) are allowed to occur on the gate's\n target qubits and those reachable by hopping at most `maxhops` times\n from a target qubit along nearest-neighbor links (defined by the\n `geometry`).\n\n extra_weight_1_hops : int, optional\n Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0\n can be useful for allowing just weight-1 errors (of which there are\n relatively few) to be dispersed farther from a gate's target qubits.\n For example, a crosstalk-detecting model might use this.\n\n extra_gate_weight : int, optional\n Addtional weight, beyond the number of target qubits (taken as a \"base\n weight\" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If\n this equals 1, for instance, then 1-qubit gates can have up to weight-2\n errors and 2-qubit gates can have up to weight-3 errors.\n\n simulator : ForwardSimulator or {\"auto\", \"matrix\", \"map\"}\n The circuit simulator used to compute any\n requested probabilities, e.g. from :method:`probs` or\n :method:`bulk_probs`. Using `\"auto\"` selects `\"matrix\"` when there\n are 2 qubits or less, and otherwise selects `\"map\"`.\n\n evotype : Evotype or str, optional\n The evolution type of this model, describing how states are\n represented. The special value `\"default\"` is equivalent\n to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.\n\n gate_type : str, optional\n The Lindblad-error parameterization type used for gate operations. This\n may be expanded in the future, but currently the gate errors *must* be of\n the Lindblad error-generator coefficients type, and this argument specifies\n what elementary error-generator coefficients are initially allowed (and linked to\n model parameters), before maximum-weight and locality constraints are imposed.\n In addition to the usual Lindblad error types, (e.g. `\"H\"`, `\"H+S\"`) the special\n values `\"none\"` is allowed to indicate that there should be no errors on the gates\n (useful if you only want errors on the SPAM, for instance).\n\n spam_type : str, optional\n Similar to `gate_type` but for SPAM elements (state preparations\n and POVMs). This specifies the Lindblad-error parameterization for the\n state prepearation and POVM.\n\n implicit_idle_mode : {'none', 'add_global'}\n The way idel operations are added implicitly within the created model. `\"nonw\"`\n doesn't add any \"extra\" idle operations when there is a layer that contains some\n gates but not gates on all the qubits. `\"add_global\"` adds the global idle operation,\n i.e., the operation for a global idle layer (zero gates - a completely empty layer),\n to every layer that is simulated, using the global idle as a background idle that always\n occurs regardless of the operation.\n\n errcomp_type : {\"gates\",\"errorgens\"}\n How errors are composed when creating layer operations in the created\n model. `\"gates\"` means that the errors on multiple gates in a single\n layer are composed as separate and subsequent processes. Specifically,\n the layer operation has the form `Composed(target,idleErr,cloudErr)`\n where `target` is a composition of all the ideal gate operations in the\n layer, `idleErr` is the global idle error if `implicit_idle_mode == 'add_global'`,\n and `cloudErr` is the composition (ordered as layer-label) of cloud-\n noise contributions, i.e. a map that acts as the product of exponentiated\n error-generator matrices. `\"errorgens\"` means that layer operations\n have the form `Composed(target, error)` where `target` is as above and\n `error` results from composing (summing) the idle and cloud-noise error\n *generators*, i.e. a map that acts as the exponentiated sum of error\n generators (ordering is irrelevant in this case).\n\n independent_gates : bool, optional\n Whether the noise added to a gate when it acts on one set of target\n qubits is independent of its noise on a different set of target qubits.\n If False, then all gates with the same name (e.g. \"Gx\") will be constrained\n to having the *same* noise on the cloud around the target qubits (even though\n the target qubits and cloud are different). If True, then gate noise operations\n for different sets of target qubits are independent.\n\n independent_spam : bool, optional\n Similar to `independent_gates` but for state preparation and measurement operations.\n When `False`, the noise applied to each set (individual or pair or triple etc.) of\n qubits must be the same, e.g., if the state preparation is a perfect preparation followed\n by a single-qubit rotation then this rotation must be by the *same* angle on all of\n the qubits.\n\n connected_highweight_errors : bool, optional\n An additional constraint regarding high-weight errors. When `True`, only high weight\n (weight 2+) elementary error generators whose non-trivial Paulis occupy a *connected*\n portion of the qubit graph are allowed. For example, if the qubit graph is a 1D chain\n of 4 qubits, 1-2-3-4, and weight-2 errors are allowed on a single-qubit gate with\n target = qubit-2, then weight-2 errors on 1-2 and 2-3 would be allowed, but errors on\n 1-3 would be forbidden. When `False`, no constraint is imposed.\n\n verbosity : int or VerbosityPrinter, optional\n An integer >= 0 dictating how must output to send to stdout.\n\n Returns\n -------\n CloudNoiseModel\n \"\"\"\n\n # construct noise specifications for the cloudnoise model\n modelnoise = {}\n all_qubit_labels = processor_spec.qubit_labels\n conn = connected_highweight_errors # shorthand: whether high-weight errors must be connected on the graph\n global_idle_name = processor_spec.global_idle_gate_name\n\n # Global Idle\n if max_idle_weight > 0:\n assert(global_idle_name is not None), \\\n \"`max_idle_weight` must equal 0 for processor specs without a global idle gate!\"\n #printer.log(\"Creating Idle:\")\n wt_maxhop_tuples = [(i, None) for i in range(1, max_idle_weight + 1)]\n modelnoise[global_idle_name] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples,\n gate_type, conn)\n\n # SPAM\n if max_spam_weight > 0:\n wt_maxhop_tuples = [(i, None) for i in range(1, max_spam_weight + 1)]\n modelnoise['prep'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)\n modelnoise['povm'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)\n\n # Gates\n weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \\\n [(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n\n weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \\\n [(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n\n for gatenm, gate_unitary in processor_spec.gate_unitaries.items():\n if gatenm == global_idle_name: continue # processed above\n gate_nQubits = int(gate_unitary) if isinstance(gate_unitary, (int, _np.int64)) \\\n else int(round(_np.log2(gate_unitary.shape[0]))) # NOTE: integer gate_unitary => idle on n qubits\n if gate_nQubits not in (1, 2):\n raise ValueError(\"Only 1- and 2-qubit gates are supported. %s acts on %d qubits!\"\n % (str(gatenm), gate_nQubits))\n weight_maxhops_tuples = weight_maxhops_tuples_1Q if gate_nQubits == 1 else weight_maxhops_tuples_2Q\n target_sslbls = ('@0',) if gate_nQubits == 1 else ('@0', '@1')\n modelnoise[gatenm] = _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples,\n gate_type, conn)\n\n return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates,\n evotype, simulator, independent_gates, independent_spam,\n errcomp_type, implicit_idle_mode, verbosity)\n\n\ndef _iter_basis_inds(weight):\n \"\"\" Iterate over product of `weight` non-identity Pauli 1Q basis indices \"\"\"\n basisIndList = [[1, 2, 3]] * weight # assume pauli 1Q basis, and only iterate over non-identity els\n for basisInds in _itertools.product(*basisIndList):\n yield basisInds\n\n\ndef _pauli_product_matrix(sigma_inds):\n \"\"\"\n Construct the Pauli product matrix from the given `sigma_inds`\n\n Parameters\n ----------\n sigma_inds : iterable\n A sequence of integers in the range [0,3] corresponding to the\n I, X, Y, Z Pauli basis matrices.\n\n Returns\n -------\n numpy.ndarray or scipy.sparse.csr_matrix\n \"\"\"\n sigmaVec = (id2x2 / sqrt2, sigmax / sqrt2, sigmay / sqrt2, sigmaz / sqrt2)\n M = _np.identity(1, 'complex')\n for i in sigma_inds:\n M = _np.kron(M, sigmaVec[i])\n return M\n\n\ndef _construct_restricted_weight_pauli_basis(wt, sparse=False):\n basisEl_Id = _pauli_product_matrix(_np.zeros(wt, _np.int64))\n errbasis = [basisEl_Id]\n errbasis_lbls = ['I']\n for err_basis_inds in _iter_basis_inds(wt):\n error = _np.array(err_basis_inds, _np.int64) # length == wt\n basisEl = _pauli_product_matrix(error)\n errbasis.append(basisEl)\n errbasis_lbls.append(''.join([\"IXYZ\"[i] for i in err_basis_inds]))\n\n #printer.log(\"Error on qubits %s -> error basis of length %d\" % (err_qubit_inds, len(errbasis)), 3)\n return _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)\n\n\ndef _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples, lnd_parameterization, connected=True):\n\n # This function:\n # loop over all size-`wt` *connected* combinations, `err_qubit_inds`, of the qubit indices in\n # `possible_err_qubit_inds`\n # - construct a local weight-`wt` Pauli basis & corresponding LindbladErrorgen on `wt` qubits\n # => replace with: opnoise.create_errorgen(evotype, state_space=None) where opnoise is for a wt-qubit op\n # - embed this constructed local error onto `err_qubit_inds`\n # - append embedded error onto running list\n #\n # Noise object structure:\n # OpModelPerOpNoise( { op_key/'idle': { sslbls : opnoise } } )\n # where sslbls can be absolute labels or stencil labels\n # -- could have a fn that spreads a single opnoise onto all the sslbls\n # given by size-`wt` connected combos of `possible_err_qubit_inds` - this would work for independent clouds\n # -- have LindbladNoiseDict and another LindbladPauliAtWeight (?) noise objects,\n # since we want to specify a lindblad noise by giving a weight and an initial basis (Pauli here)\n\n # To build a cloudnoise model from hops & weights:\n modelnoise_dict = {}\n if lnd_parameterization == 'none' or lnd_parameterization is None:\n return {} # special case when we don't want any error parameterization\n\n for wt, max_hops in weight_maxhops_tuples:\n if max_hops is None or max_hops == 0: # Note: maxHops not used in this case\n stencil_lbl = _stencil.StencilLabelAllCombos(target_sslbls, wt, connected)\n else:\n stencil_lbl = _stencil.StencilLabelRadiusCombos(target_sslbls, max_hops, wt, connected)\n\n local_state_space = _statespace.default_space_for_num_qubits(wt)\n modelnoise_dict[stencil_lbl] = _LindbladNoise.from_basis_coefficients(\n lnd_parameterization, _construct_restricted_weight_pauli_basis(wt),\n local_state_space)\n return modelnoise_dict\n\n\ndef _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,\n depolarization_parameterization, stochastic_parameterization, lindblad_parameterization,\n allow_nonlocal):\n\n modelnoises = []\n if depolarization_strengths is not None:\n noise_dict = {}\n for lbl, val in depolarization_strengths.items():\n if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications\n if not allow_nonlocal: raise ValueError(\"Nonlocal depolarization strengths not allowed!\")\n noise_dict[lbl] = {k: _DepolarizationNoise(v, depolarization_parameterization) for k, v in val.items()}\n else:\n noise_dict[lbl] = _DepolarizationNoise(val, depolarization_parameterization)\n modelnoises.append(_OpModelPerOpNoise(noise_dict))\n\n if stochastic_error_probs is not None:\n noise_dict = {}\n for lbl, val in stochastic_error_probs.items():\n if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications\n if not allow_nonlocal: raise ValueError(\"Nonlocal stochastic error probs not allowed!\")\n noise_dict[lbl] = {k: _StochasticNoise(v, stochastic_parameterization) for k, v in val.items()}\n else:\n noise_dict[lbl] = _StochasticNoise(val, stochastic_parameterization)\n modelnoises.append(_OpModelPerOpNoise(noise_dict))\n\n if lindblad_error_coeffs is not None:\n\n if not allow_nonlocal: # the easy case\n modelnoises.append(_OpModelPerOpNoise({lbl: _LindbladNoise(val, lindblad_parameterization)\n for lbl, val in lindblad_error_coeffs.items()}))\n else: # then need to process labels like ('H', 'XX:0,1') or 'HXX:0,1'\n def process_stencil_labels(flat_lindblad_errs):\n nonlocal_errors = _collections.OrderedDict()\n local_errors = _collections.OrderedDict()\n\n for nm, val in flat_lindblad_errs.items():\n if isinstance(nm, str): nm = (nm[0], nm[1:]) # e.g. \"HXX\" => ('H','XX')\n err_typ, basisEls = nm[0], nm[1:]\n sslbls = None\n local_nm = [err_typ]\n for bel in basisEls: # e.g. bel could be \"X:Q0\" or \"XX:Q0,Q1\"\n # OR \"X:<n>\" where n indexes a target qubit or \"X:<dir>\" where dir indicates\n # a graph *direction*, e.g. \"up\"\n if ':' in bel:\n bel_name, bel_sslbls = bel.split(':') # should have form <name>:<comma-separated-sslbls>\n bel_sslbls = bel_sslbls.split(',') # e.g. ('Q0','Q1')\n integerized_sslbls = []\n for ssl in bel_sslbls:\n try: integerized_sslbls.append(int(ssl))\n except: integerized_sslbls.append(ssl)\n bel_sslbls = tuple(integerized_sslbls)\n else:\n bel_name = bel\n bel_sslbls = None\n\n if sslbls is None:\n sslbls = bel_sslbls\n else:\n #Note: sslbls should always be the same if there are multiple basisEls,\n # i.e for nm == ('S',bel1,bel2)\n assert(sslbls is bel_sslbls or sslbls == bel_sslbls), \\\n \"All basis elements of the same error term must operate on the *same* state!\"\n local_nm.append(bel_name) # drop the state space labels, e.g. \"XY:Q0,Q1\" => \"XY\"\n\n # keep track of errors by the qubits they act on, as only each such\n # set will have it's own LindbladErrorgen\n local_nm = tuple(local_nm) # so it's hashable\n if sslbls is not None:\n sslbls = tuple(sorted(sslbls))\n if sslbls not in nonlocal_errors:\n nonlocal_errors[sslbls] = _collections.OrderedDict()\n if local_nm in nonlocal_errors[sslbls]:\n nonlocal_errors[sslbls][local_nm] += val\n else:\n nonlocal_errors[sslbls][local_nm] = val\n else:\n if local_nm in local_errors:\n local_errors[local_nm] += val\n else:\n local_errors[local_nm] = val\n\n if len(nonlocal_errors) == 0:\n return _LindbladNoise(local_errors, lindblad_parameterization)\n else:\n all_errors = []\n if len(local_errors) > 0:\n all_errors.append((None, _LindbladNoise(local_errors, lindblad_parameterization)))\n for sslbls, errdict in nonlocal_errors.items():\n all_errors.append((sslbls, _LindbladNoise(errdict, lindblad_parameterization)))\n return _collections.OrderedDict(all_errors)\n\n modelnoises.append(_OpModelPerOpNoise({lbl: process_stencil_labels(val)\n for lbl, val in lindblad_error_coeffs.items()}))\n\n return _ComposedOpModelNoise(modelnoises)\n\n\n@_deprecated_fn(\"This function is overly specific and will be removed soon.\")\ndef _nparams_xycnot_cloudnoise_model(num_qubits, geometry=\"line\", max_idle_weight=1, maxhops=0,\n extra_weight_1_hops=0, extra_gate_weight=0, require_connected=False,\n independent_1q_gates=True, zz_only=False, bidirectional_cnots=True, verbosity=0):\n \"\"\"\n Compute the number of parameters in a particular :class:`CloudNoiseModel`.\n\n Returns the number of parameters in the :class:`CloudNoiseModel` containing\n X(pi/2), Y(pi/2) and CNOT gates using the specified arguments without\n actually constructing the model (useful for considering parameter-count\n scaling).\n\n Parameters\n ----------\n num_qubits : int\n The total number of qubits.\n\n geometry : {\"line\",\"ring\",\"grid\",\"torus\"} or QubitGraph\n The type of connectivity among the qubits, specifying a\n graph used to define neighbor relationships. Alternatively,\n a :class:`QubitGraph` object may be passed directly.\n\n max_idle_weight : int, optional\n The maximum-weight for errors on the global idle gate.\n\n maxhops : int\n The locality constraint: for a gate, errors (of weight up to the\n maximum weight for the gate) are allowed to occur on the gate's\n target qubits and those reachable by hopping at most `maxhops` times\n from a target qubit along nearest-neighbor links (defined by the\n `geometry`).\n\n extra_weight_1_hops : int, optional\n Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0\n can be useful for allowing just weight-1 errors (of which there are\n relatively few) to be dispersed farther from a gate's target qubits.\n For example, a crosstalk-detecting model might use this.\n\n extra_gate_weight : int, optional\n Addtional weight, beyond the number of target qubits (taken as a \"base\n weight\" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If\n this equals 1, for instance, then 1-qubit gates can have up to weight-2\n errors and 2-qubit gates can have up to weight-3 errors.\n\n require_connected : bool, optional\n If True, then high-weight errors only occur on connected (via `geometry`) qubits.\n For example in a line of qubits there would not be weight-2 errors on qubits 1 and 3.\n\n independent_1q_gates : bool, optional\n If True, 1Q gates on different qubits have separate (distinct) parameters. If\n False, the 1Q gates of each type (e.g. an pi/2 X gate) for different qubits share\n the same set of parameters.\n\n zz_only : bool, optional\n If True, the only high-weight errors allowed are of \"Z^n\" type.\n\n bidirectional_cnots : bool\n Whether CNOT gates can be performed in either direction (and each direction should\n be treated as an indepedent gate)\n\n verbosity : int, optional\n An integer >= 0 dictating how much output to send to stdout.\n\n Returns\n -------\n int\n \"\"\"\n # noise can be either a seed or a random array that is long enough to use\n\n printer = _VerbosityPrinter.create_printer(verbosity)\n printer.log(\"Computing parameters for a %d-qubit %s model\" % (num_qubits, geometry))\n\n qubitGraph = _QubitGraph.common_graph(num_qubits, geometry, directed=True, all_directions=True)\n #printer.log(\"Created qubit graph:\\n\"+str(qubitGraph))\n\n def idle_count_nparams(max_weight):\n \"\"\"Parameter count of a `build_nqn_global_idle`-constructed gate\"\"\"\n ret = 0\n possible_err_qubit_inds = _np.arange(num_qubits)\n for wt in range(1, max_weight + 1):\n nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)\n if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)\n else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt\n nErrParams = 2 * basisSizeWoutId # H+S terms\n ret += nErrTargetLocations * nErrParams\n return ret\n\n def op_count_nparams(target_qubit_inds, weight_maxhops_tuples, debug=False):\n \"\"\"Parameter count of a `build_nqn_composed_gate`-constructed gate\"\"\"\n ret = 0\n #Note: no contrib from idle noise (already parameterized)\n for wt, maxHops in weight_maxhops_tuples:\n possible_err_qubit_inds = _np.array(qubitGraph.radius(target_qubit_inds, maxHops), _np.int64)\n if require_connected:\n nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)\n else:\n nErrTargetLocations = _scipy.special.comb(len(possible_err_qubit_inds), wt)\n if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)\n else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt\n nErrParams = 2 * basisSizeWoutId # H+S terms\n if debug:\n print(\" -- wt%d, hops%d: inds=%s locs = %d, eparams=%d, total contrib = %d\" %\n (wt, maxHops, str(possible_err_qubit_inds), nErrTargetLocations,\n nErrParams, nErrTargetLocations * nErrParams))\n ret += nErrTargetLocations * nErrParams\n return ret\n\n nParams = _collections.OrderedDict()\n\n printer.log(\"Creating Idle:\")\n nParams[_label.Label('Gi')] = idle_count_nparams(max_idle_weight)\n\n #1Q gates: X(pi/2) & Y(pi/2) on each qubit\n weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \\\n [(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n\n if independent_1q_gates:\n for i in range(num_qubits):\n printer.log(\"Creating 1Q X(pi/2) and Y(pi/2) gates on qubit %d!!\" % i)\n nParams[_label.Label(\"Gx\", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)\n nParams[_label.Label(\"Gy\", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)\n else:\n printer.log(\"Creating common 1Q X(pi/2) and Y(pi/2) gates\")\n rep = int(num_qubits / 2)\n nParams[_label.Label(\"Gxrep\")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)\n nParams[_label.Label(\"Gyrep\")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)\n\n #2Q gates: CNOT gates along each graph edge\n weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \\\n [(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]\n seen_pairs = set()\n for i, j in qubitGraph.edges(): # note: all edges have i<j so \"control\" of CNOT is always lower index (arbitrary)\n if bidirectional_cnots is False:\n ordered_tup = (i, j) if i <= j else (j, i)\n if ordered_tup in seen_pairs: continue\n else: seen_pairs.add(ordered_tup)\n\n printer.log(\"Creating CNOT gate between qubits %d and %d!!\" % (i, j))\n nParams[_label.Label(\"Gcnot\", (i, j))] = op_count_nparams((i, j), weight_maxhops_tuples_2Q)\n\n #SPAM\n nPOVM_1Q = 4 # params for a single 1Q POVM\n nParams[_label.Label('rho0')] = 3 * num_qubits # 3 b/c each component is TP\n nParams[_label.Label('Mdefault')] = nPOVM_1Q * num_qubits # num_qubits 1Q-POVMs\n\n return nParams, sum(nParams.values())\n", "\"\"\"\nClasses corresponding to tables within a Workspace context.\n\"\"\"\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport warnings as _warnings\n\nimport numpy as _np\n\nfrom pygsti.report import plothelpers as _ph\nfrom pygsti.report import reportables as _reportables\nfrom pygsti.report import workspaceplots as _wp\nfrom pygsti.report.reportables import evaluate as _ev\nfrom pygsti.report.table import ReportTable as _ReportTable\nfrom pygsti.report.workspace import WorkspaceTable\nfrom pygsti.report.reportableqty import ReportableQty as _ReportableQty, minimum as _rqty_minimum\nfrom pygsti import circuits as _circuits\nfrom pygsti import models as _models\nfrom pygsti import baseobjs as _baseobjs\nfrom pygsti import tools as _tools\nfrom pygsti.algorithms import gaugeopt as _gopt\nfrom pygsti.modelmembers import operations as _op\nfrom pygsti.modelmembers import povms as _povm\nfrom pygsti.modelmembers import states as _state\nfrom pygsti.objectivefns import objectivefns as _objfns\nfrom pygsti.circuits.circuit import Circuit as _Circuit\n\n\nclass BlankTable(WorkspaceTable):\n \"\"\"\n A completely blank placeholder table.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n \"\"\"\n\n def __init__(self, ws):\n \"\"\"A completely blank placeholder table.\"\"\"\n super(BlankTable, self).__init__(ws, self._create)\n\n def _create(self):\n table = _ReportTable(['Blank'], [None])\n table.finish()\n return table\n\n\nclass SpamTable(WorkspaceTable):\n \"\"\"\n A table of one or more model's SPAM elements.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n models : Model or list of Models\n The Model(s) whose SPAM elements should be displayed. If\n multiple Models are given, they should have the same SPAM\n elements..\n\n titles : list of strs, optional\n Titles correponding to elements of `models`, e.g. `\"Target\"`.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the SPAM matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored\n boxes (space-conserving and better for large matrices).\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n include_hs_vec : boolean, optional\n Whether or not to include Hilbert-Schmidt\n vector representation columns in the table.\n \"\"\"\n\n def __init__(self, ws, models, titles=None,\n display_as=\"boxes\", confidence_region_info=None,\n include_hs_vec=True):\n \"\"\"\n A table of one or more model's SPAM elements.\n\n Parameters\n ----------\n models : Model or list of Models\n The Model(s) whose SPAM elements should be displayed. If\n multiple Models are given, they should have the same SPAM\n elements..\n\n titles : list of strs, optional\n Titles correponding to elements of `models`, e.g. `\"Target\"`.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the SPAM matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored\n boxes (space-conserving and better for large matrices).\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n include_hs_vec : boolean, optional\n Whether or not to include Hilbert-Schmidt\n vector representation columns in the table.\n \"\"\"\n super(SpamTable, self).__init__(ws, self._create, models,\n titles, display_as, confidence_region_info,\n include_hs_vec)\n\n def _create(self, models, titles, display_as, confidence_region_info,\n include_hs_vec):\n\n if isinstance(models, _models.Model):\n models = [models]\n\n rhoLabels = list(models[0].preps.keys()) # use labels of 1st model\n povmLabels = list(models[0].povms.keys()) # use labels of 1st model\n\n if titles is None:\n titles = [''] * len(models)\n\n colHeadings = ['Operator']\n for model, title in zip(models, titles):\n colHeadings.append('%sMatrix' % (title + ' ' if title else ''))\n for model, title in zip(models, titles):\n colHeadings.append('%sEigenvals' % (title + ' ' if title else ''))\n\n formatters = [None] * len(colHeadings)\n\n if include_hs_vec:\n model = models[-1] # only show HSVec for last model\n basisNm = _tools.basis_longname(model.basis)\n colHeadings.append('Hilbert-Schmidt vector (%s basis)' % basisNm)\n formatters.append(None)\n\n if confidence_region_info is not None:\n colHeadings.append('%g%% C.I. half-width' % confidence_region_info.level)\n formatters.append('Conversion')\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n for lbl in rhoLabels:\n rowData = [lbl]; rowFormatters = ['Rho']\n\n for model in models:\n rhoMx = _ev(_reportables.Vec_as_stdmx(model, lbl, \"prep\"))\n # confidence_region_info) #don't put CIs on matrices for now\n if display_as == \"numbers\":\n rowData.append(rhoMx)\n rowFormatters.append('Brackets')\n elif display_as == \"boxes\":\n rhoMx_real = rhoMx.hermitian_to_real()\n v = rhoMx_real.value\n fig = _wp.GateMatrixPlot(self.ws, v, colorbar=False,\n box_labels=True, prec='compacthp',\n mx_basis=None) # no basis labels\n rowData.append(fig)\n rowFormatters.append('Figure')\n else:\n raise ValueError(\"Invalid 'display_as' argument: %s\" % display_as)\n\n for model in models:\n cri = confidence_region_info if confidence_region_info and \\\n (confidence_region_info.model.frobeniusdist(model) < 1e-6) else None\n evals = _ev(_reportables.Vec_as_stdmx_eigenvalues(model, lbl, \"prep\"),\n cri)\n rowData.append(evals)\n rowFormatters.append('Brackets')\n\n if include_hs_vec:\n rowData.append(models[-1].preps[lbl])\n rowFormatters.append('Normal')\n\n if confidence_region_info is not None:\n intervalVec = confidence_region_info.retrieve_profile_likelihood_confidence_intervals(lbl)[:, None]\n if intervalVec.shape[0] == models[-1].dim - 1:\n #TP constrained, so pad with zero top row\n intervalVec = _np.concatenate((_np.zeros((1, 1), 'd'), intervalVec), axis=0)\n rowData.append(intervalVec); rowFormatters.append('Normal')\n\n #Note: no dependence on confidence region (yet) when HS vector is not shown...\n table.add_row(rowData, rowFormatters)\n\n for povmlbl in povmLabels:\n for lbl in models[0].povms[povmlbl].keys():\n povmAndELbl = str(povmlbl) + \":\" + lbl # format for ModelFunction objs\n # show POVM name if there's more than one of them\n rowData = [lbl] if (len(povmLabels) == 1) else [povmAndELbl]\n rowFormatters = ['Effect']\n\n for model in models:\n EMx = _ev(_reportables.Vec_as_stdmx(model, povmAndELbl, \"effect\"))\n #confidence_region_info) #don't put CIs on matrices for now\n if display_as == \"numbers\":\n rowData.append(EMx)\n rowFormatters.append('Brackets')\n elif display_as == \"boxes\":\n EMx_real = EMx.hermitian_to_real()\n v = EMx_real.value\n fig = _wp.GateMatrixPlot(self.ws, v, colorbar=False,\n box_labels=True, prec='compacthp',\n mx_basis=None) # no basis labels\n rowData.append(fig)\n rowFormatters.append('Figure')\n else:\n raise ValueError(\"Invalid 'display_as' argument: %s\" % display_as) # pragma: no cover\n\n for model in models:\n cri = confidence_region_info if confidence_region_info and \\\n (confidence_region_info.model.frobeniusdist(model) < 1e-6) else None\n evals = _ev(_reportables.Vec_as_stdmx_eigenvalues(model, povmAndELbl, \"effect\"),\n cri)\n rowData.append(evals)\n rowFormatters.append('Brackets')\n\n if include_hs_vec:\n rowData.append(models[-1].povms[povmlbl][lbl])\n rowFormatters.append('Normal')\n\n if confidence_region_info is not None:\n intervalVec = confidence_region_info.retrieve_profile_likelihood_confidence_intervals(povmlbl)[\n :, None] # for all povm params\n intervalVec = intervalVec[models[-1].povms[povmlbl][lbl].gpindices] # specific to this effect\n rowData.append(intervalVec); rowFormatters.append('Normal')\n\n #Note: no dependence on confidence region (yet) when HS vector is not shown...\n table.add_row(rowData, rowFormatters)\n\n table.finish()\n return table\n\n\nclass SpamParametersTable(WorkspaceTable):\n \"\"\"\n A table for \"SPAM parameters\" (dot products of SPAM vectors)\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n models : Model or list of Models\n The Model(s) whose SPAM parameters should be displayed. If\n multiple Models are given, they should have the same gates.\n\n titles : list of strs, optional\n Titles correponding to elements of `models`, e.g. `\"Target\"`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, models, titles=None, confidence_region_info=None):\n \"\"\"\n Create a table for model's \"SPAM parameters\", that is, the\n dot products of prep-vectors and effect-vectors.\n\n Parameters\n ----------\n models : Model or list of Models\n The Model(s) whose SPAM parameters should be displayed. If\n multiple Models are given, they should have the same gates.\n\n titles : list of strs, optional\n Titles correponding to elements of `models`, e.g. `\"Target\"`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(SpamParametersTable, self).__init__(ws, self._create, models, titles, confidence_region_info)\n\n def _create(self, models, titles, confidence_region_info):\n\n if isinstance(models, _models.Model):\n models = [models]\n if titles is None:\n titles = [''] * len(models)\n\n if len(models[0].povms) == 1:\n povmKey = list(models[0].povms.keys())[0]\n effectLbls = [eLbl for eLbl in models[0].povms[povmKey]]\n else:\n effectLbls = [povmLbl + \".\" + eLbl\n for povmLbl, povm in models[0].povms.items()\n for eLbl in povm.keys()]\n\n colHeadings = [''] + effectLbls\n formatters = [None] + ['Effect'] * len(effectLbls)\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n for gstitle, model in zip(titles, models):\n cri = confidence_region_info if (confidence_region_info\n and confidence_region_info.model.frobeniusdist(model) < 1e-6) else None\n spamDotProdsQty = _ev(_reportables.Spam_dotprods(model), cri)\n DPs, DPEBs = spamDotProdsQty.value_and_errorbar\n assert(DPs.shape[1] == len(effectLbls)), \\\n \"Models must have the same number of POVMs & effects\"\n\n formatters = ['Rho'] + ['Normal'] * len(effectLbls) # for rows below\n\n for ii, prepLabel in enumerate(model.preps.keys()): # ii enumerates rhoLabels to index DPs\n prefix = gstitle + \" \" if len(gstitle) else \"\"\n rowData = [prefix + str(prepLabel)]\n for jj, _ in enumerate(effectLbls): # jj enumerates eLabels to index DPs\n if cri is None:\n rowData.append((DPs[ii, jj], None))\n else:\n rowData.append((DPs[ii, jj], DPEBs[ii, jj]))\n table.add_row(rowData, formatters)\n\n table.finish()\n return table\n\n\nclass GatesTable(WorkspaceTable):\n \"\"\"\n Create a table showing a model's raw gates.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n models : Model or list of Models\n The Model(s) whose gates should be displayed. If multiple\n Models are given, they should have the same operation labels.\n\n titles : list of strings, optional\n A list of titles corresponding to the models, used to\n prefix the column(s) for that model. E.g. `\"Target\"`.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the operation matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored\n boxes (space-conserving and better for large matrices).\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals for the *final*\n element of `models`.\n \"\"\"\n\n def __init__(self, ws, models, titles=None, display_as=\"boxes\",\n confidence_region_info=None):\n \"\"\"\n Create a table showing a model's raw gates.\n\n Parameters\n ----------\n models : Model or list of Models\n The Model(s) whose gates should be displayed. If multiple\n Models are given, they should have the same operation labels.\n\n titles : list of strings, optional\n A list of titles corresponding to the models, used to\n prefix the column(s) for that model. E.g. `\"Target\"`.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the operation matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored\n boxes (space-conserving and better for large matrices).\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals for the *final*\n element of `models`.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GatesTable, self).__init__(ws, self._create, models, titles,\n display_as, confidence_region_info)\n\n def _create(self, models, titles, display_as, confidence_region_info):\n\n if isinstance(models, _models.Model):\n models = [models]\n\n opLabels = models[0].primitive_op_labels # use labels of 1st model\n instLabels = list(models[0].instruments.keys()) # requires an explicit model!\n assert(isinstance(models[0], _models.ExplicitOpModel)), \"%s only works with explicit models\" % str(type(self))\n\n if titles is None:\n titles = [''] * len(models)\n\n colHeadings = ['Gate']\n for model, title in zip(models, titles):\n basisLongNm = _tools.basis_longname(model.basis)\n pre = (title + ' ' if title else '')\n colHeadings.append('%sSuperoperator (%s basis)' % (pre, basisLongNm))\n formatters = [None] * len(colHeadings)\n\n if confidence_region_info is not None:\n #Only use confidence region for the *final* model.\n colHeadings.append('%g%% C.I. half-width' % confidence_region_info.level)\n formatters.append('Conversion')\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n #Create list of labels and gate-like objects, allowing instruments to be included:\n label_op_tups = []\n for gl in opLabels:\n # may want to gracefully handle index error here?\n tup_of_ops = tuple([model.operations[gl] for model in models])\n label_op_tups.append((gl, None, tup_of_ops))\n for il in instLabels:\n for comp_lbl in models[0].instruments[il].keys():\n tup_of_ops = tuple([model.instruments[il][comp_lbl] for model in models]\n ) # may want to gracefully handle index error here?\n label_op_tups.append((il, comp_lbl, tup_of_ops))\n\n for lbl, comp_lbl, per_model_ops in label_op_tups:\n row_data = [lbl if (comp_lbl is None) else (lbl + '.' + comp_lbl)]\n row_formatters = [None]\n\n for model, op in zip(models, per_model_ops):\n basis = model.basis\n\n if display_as == \"numbers\":\n row_data.append(op.to_dense('HilbertSchmidt'))\n row_formatters.append('Brackets')\n elif display_as == \"boxes\":\n fig = _wp.GateMatrixPlot(self.ws, op.to_dense(on_space='HilbertSchmidt'),\n colorbar=False,\n mx_basis=basis)\n\n row_data.append(fig)\n row_formatters.append('Figure')\n else:\n raise ValueError(\"Invalid 'display_as' argument: %s\" % display_as)\n\n if confidence_region_info is not None:\n intervalVec = confidence_region_info.retrieve_profile_likelihood_confidence_intervals(\n lbl, comp_lbl)[:, None]\n\n if isinstance(per_model_ops[-1], _op.FullArbitraryOp):\n #then we know how to reshape into a matrix\n op_dim = models[-1].dim\n basis = models[-1].basis\n intervalMx = intervalVec.reshape(op_dim, op_dim)\n elif isinstance(per_model_ops[-1], _op.FullTPOp):\n #then we know how to reshape into a matrix\n op_dim = models[-1].dim\n basis = models[-1].basis\n intervalMx = _np.concatenate((_np.zeros((1, op_dim), 'd'),\n intervalVec.reshape(op_dim - 1, op_dim)), axis=0)\n else:\n # we don't know how best to reshape interval matrix for gate, so\n # use derivative\n op_dim = models[-1].dim\n basis = models[-1].basis\n op_deriv = per_model_ops[-1].deriv_wrt_params()\n intervalMx = _np.abs(_np.dot(op_deriv, intervalVec).reshape(op_dim, op_dim))\n\n if display_as == \"numbers\":\n row_data.append(intervalMx)\n row_formatters.append('Brackets')\n\n elif display_as == \"boxes\":\n maxAbsVal = _np.max(_np.abs(intervalMx))\n fig = _wp.GateMatrixPlot(self.ws, intervalMx,\n color_min=-maxAbsVal, color_max=maxAbsVal,\n colorbar=False,\n mx_basis=basis)\n row_data.append(fig)\n row_formatters.append('Figure')\n else:\n assert(False) # pragma: no cover\n\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass ChoiTable(WorkspaceTable):\n \"\"\"\n A table of the Choi representations of a Model's gates\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n models : Model or list of Models\n The Model(s) whose Choi info should be displayed. If multiple\n Models are given, they should have the same operation labels.\n\n titles : list of strings, optional\n A list of titles corresponding to the models, used to\n prefix the column(s) for that model. E.g. `\"Target\"`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display eigenvalue error intervals for the\n *final* Model in `models`.\n\n display : tuple/list of {\"matrices\",\"eigenvalues\",\"barplot\",\"boxplot\"}\n Which columns to display: the Choi matrices (as numerical grids),\n the Choi matrix eigenvalues (as a numerical list), the eigenvalues\n on a bar plot, and/or the matrix as a plot of colored boxes.\n \"\"\"\n\n def __init__(self, ws, models, titles=None,\n confidence_region_info=None,\n display=(\"matrix\", \"eigenvalues\", \"barplot\")):\n \"\"\"\n Create a table of the Choi matrices and/or their eigenvalues of\n a model's gates.\n\n Parameters\n ----------\n models : Model or list of Models\n The Model(s) whose Choi info should be displayed. If multiple\n Models are given, they should have the same operation labels.\n\n titles : list of strings, optional\n A list of titles corresponding to the models, used to\n prefix the column(s) for that model. E.g. `\"Target\"`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display eigenvalue error intervals for the\n *final* Model in `models`.\n\n display : tuple/list of {\"matrices\",\"eigenvalues\",\"barplot\",\"boxplot\"}\n Which columns to display: the Choi matrices (as numerical grids),\n the Choi matrix eigenvalues (as a numerical list), the eigenvalues\n on a bar plot, and/or the matrix as a plot of colored boxes.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(ChoiTable, self).__init__(ws, self._create, models, titles,\n confidence_region_info, display)\n\n def _create(self, models, titles, confidence_region_info, display):\n if isinstance(models, _models.Model):\n models = [models]\n\n opLabels = models[0].primitive_op_labels # use labels of 1st model\n assert(isinstance(models[0], _models.ExplicitOpModel)), \"%s only works with explicit models\" % str(type(self))\n\n if titles is None:\n titles = [''] * len(models)\n\n qtysList = []\n for model in models:\n opLabels = model.primitive_op_labels # operation labels\n #qtys_to_compute = []\n if 'matrix' in display or 'boxplot' in display:\n choiMxs = [_ev(_reportables.Choi_matrix(model, gl)) for gl in opLabels]\n else:\n choiMxs = None\n if 'eigenvalues' in display or 'barplot' in display:\n evals = [_ev(_reportables.Choi_evals(model, gl), confidence_region_info) for gl in opLabels]\n else:\n evals = None\n qtysList.append((choiMxs, evals))\n colHeadings = ['Gate']\n for disp in display:\n if disp == \"matrix\":\n for model, title in zip(models, titles):\n basisLongNm = _tools.basis_longname(model.basis)\n pre = (title + ' ' if title else '')\n colHeadings.append('%sChoi matrix (%s basis)' % (pre, basisLongNm))\n elif disp == \"eigenvalues\":\n for model, title in zip(models, titles):\n pre = (title + ' ' if title else '')\n colHeadings.append('%sEigenvalues' % pre)\n elif disp == \"barplot\":\n for model, title in zip(models, titles):\n pre = (title + ' ' if title else '')\n colHeadings.append('%sEigenvalue Magnitudes' % pre)\n elif disp == \"boxplot\":\n for model, title in zip(models, titles):\n basisLongNm = _tools.basis_longname(model.basis)\n pre = (title + ' ' if title else '')\n colHeadings.append('%sChoi matrix (%s basis)' % (pre, basisLongNm))\n else:\n raise ValueError(\"Invalid element of `display`: %s\" % disp)\n formatters = [None] * len(colHeadings)\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n for i, gl in enumerate(opLabels):\n #Note: currently, we don't use confidence region...\n row_data = [gl]\n row_formatters = [None]\n\n for disp in display:\n if disp == \"matrix\":\n for model, (choiMxs, _) in zip(models, qtysList):\n row_data.append(choiMxs[i])\n row_formatters.append('Brackets')\n\n elif disp == \"eigenvalues\":\n for model, (_, evals) in zip(models, qtysList):\n try:\n evals[i] = evals[i].reshape(evals[i].size // 4, 4)\n #assumes len(evals) is multiple of 4!\n except: # if it isn't try 3 (qutrits)\n evals[i] = evals[i].reshape(evals[i].size // 3, 3)\n #assumes len(evals) is multiple of 3!\n row_data.append(evals[i])\n row_formatters.append('Normal')\n\n elif disp == \"barplot\":\n for model, (_, evals) in zip(models, qtysList):\n evs, evsEB = evals[i].value_and_errorbar\n fig = _wp.ChoiEigenvalueBarPlot(self.ws, evs, evsEB)\n row_data.append(fig)\n row_formatters.append('Figure')\n\n elif disp == \"boxplot\":\n for model, (choiMxs, _) in zip(models, qtysList):\n choiMx_real = choiMxs[i].hermitian_to_real()\n choiMx, EB = choiMx_real.value_and_errorbar\n fig = _wp.GateMatrixPlot(self.ws, choiMx,\n colorbar=False,\n mx_basis=model.basis,\n eb_matrix=EB)\n row_data.append(fig)\n row_formatters.append('Figure')\n\n table.add_row(row_data, row_formatters)\n table.finish()\n return table\n\n\nclass GaugeRobustModelTable(WorkspaceTable):\n \"\"\"\n Create a table showing a model in a gauge-robust representation.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The Model to display.\n\n target_model : Model\n The (usually ideal) reference model to compute gauge-invariant\n quantities with respect to.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the operation matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored\n boxes (space-conserving and better for large matrices).\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, model, target_model, display_as=\"boxes\",\n confidence_region_info=None):\n \"\"\"\n Create a table showing a gauge-invariant representation of a model.\n\n Parameters\n ----------\n model : Model\n The Model to display.\n\n target_model : Model\n The (usually ideal) reference model to compute gauge-invariant\n quantities with respect to.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the operation matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored\n boxes (space-conserving and better for large matrices).\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GaugeRobustModelTable, self).__init__(ws, self._create, model, target_model,\n display_as, confidence_region_info)\n\n def _create(self, model, target_model, display_as, confidence_region_info):\n\n assert(isinstance(model, _models.ExplicitOpModel)), \"%s only works with explicit models\" % str(type(self))\n opLabels = model.primitive_op_labels # use labels of 1st model\n\n colHeadings = ['Gate', 'M - I'] + ['FinvF(%s) - I' % str(lbl) for lbl in opLabels]\n formatters = [None] * len(colHeadings)\n confidence_region_info = None # Don't deal with CIs yet...\n\n def _get_gig_decomp(mx, tmx): # \"Gauge invariant gateset\" decomposition\n G0, G = tmx, mx\n #ev0, U0 = _tools.sorted_eig(G0)\n #ev, U = _tools.sorted_eig(G)\n #U0inv = _np.linalg.inv(U0)\n #Uinv = _np.linalg.inv(U)\n\n _, U, U0, ev0 = _tools.compute_best_case_gauge_transform(G, G0, return_all=True)\n U0inv = _np.linalg.inv(U0)\n Uinv = _np.linalg.inv(U)\n kite = _tools.compute_kite(ev0)\n\n F = _tools.find_zero_communtant_connection(U, Uinv, U0, U0inv, kite) # Uinv * F * U0 is block diag\n Finv = _np.linalg.inv(F)\n # if G0 = U0 * E0 * U0inv then\n # Uinv * F * G0 * Finv * U = D * E0 * Dinv = E0 b/c D is block diagonal w/E0's degenercies\n # so F * G0 * Finv = U * E0 * Uinv = Gp ==> Finv * G * F = M * G0\n M = _np.dot(Finv, _np.dot(G, _np.dot(F, _np.linalg.inv(G0))))\n assert(_np.linalg.norm(M.imag) < 1e-8)\n\n M0 = _np.dot(U0inv, _np.dot(M, U0)) # M in G0's eigenbasis\n assert(_np.linalg.norm(_tools.project_onto_antikite(M0, kite)) < 1e-8) # should be block diagonal\n assert(_np.allclose(G, _np.dot(F, _np.dot(M, _np.dot(G0, Finv))))) # this is desired decomp\n assert(_np.linalg.norm(M.imag) < 1e-6 and _np.linalg.norm(F.imag) < 1e-6) # and everthing should be real\n return F, M, Finv\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n I = _np.identity(model.dim, 'd')\n\n M = 0.0 # max abs for colorscale\n op_decomps = {}\n for gl in opLabels:\n try:\n op_decomps[gl] = _get_gig_decomp(model.operations[gl].to_dense(on_space='HilbertSchmidt'),\n target_model.operations[gl].to_dense(on_space='HilbertSchmidt'))\n M = max(M, max(_np.abs((op_decomps[gl][1] - I).flat))) # update max\n except Exception as e:\n _warnings.warn(\"Failed gauge-robust decomposition of %s op:\\n%s\" % (gl, str(e)))\n\n for i, lbl in enumerate(opLabels):\n if lbl not in op_decomps: continue\n for j, lbl2 in enumerate(opLabels):\n if lbl2 not in op_decomps: continue\n if i == j: continue\n val = _np.dot(op_decomps[lbl][2], op_decomps[lbl2][0]) - I # value plotted below\n M = max(M, max(_np.abs(val).flat)) # update max\n\n #FUTURE: instruments too?\n for i, lbl in enumerate(opLabels):\n row_data = [lbl]\n row_formatters = [None]\n if lbl in op_decomps:\n Fi, Mi, Finvi = op_decomps[lbl]\n\n #Print \"M\" matrix\n if display_as == \"numbers\":\n row_data.append(Mi - I)\n row_formatters.append('Brackets')\n elif display_as == \"boxes\":\n fig = _wp.GateMatrixPlot(self.ws, Mi - I, -M, M, colorbar=False)\n row_data.append(fig)\n row_formatters.append('Figure')\n else:\n raise ValueError(\"Invalid 'display_as' argument: %s\" % display_as)\n else:\n row_data.append(_ReportableQty(_np.nan))\n row_formatters.append('Normal')\n\n for j, lbl2 in enumerate(opLabels):\n if i == j:\n row_data.append(\"0\")\n row_formatters.append(None)\n elif (lbl in op_decomps and lbl2 in op_decomps):\n val = _np.dot(Finvi, op_decomps[lbl2][0])\n\n #Print \"Finv*F\" matrix\n if display_as == \"numbers\":\n row_data.append(val - I)\n row_formatters.append('Brackets')\n elif display_as == \"boxes\":\n fig = _wp.GateMatrixPlot(self.ws, val - I, -M, M, colorbar=False)\n row_data.append(fig)\n row_formatters.append('Figure')\n else:\n raise ValueError(\"Invalid 'display_as' argument: %s\" % display_as)\n else:\n row_data.append(_ReportableQty(_np.nan))\n row_formatters.append('Normal')\n\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass GaugeRobustMetricTable(WorkspaceTable):\n \"\"\"\n Create a table showing a standard metric in a gauge-robust way.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The Model to display.\n\n target_model : Model\n The (usually ideal) reference model to compute gauge-invariant\n quantities with respect to.\n\n metric : str\n The abbreviation for the metric to use. Allowed values are:\n\n - \"inf\" : entanglement infidelity\n - \"agi\" : average gate infidelity\n - \"trace\" : 1/2 trace distance\n - \"diamond\" : 1/2 diamond norm distance\n - \"nuinf\" : non-unitary entanglement infidelity\n - \"nuagi\" : non-unitary entanglement infidelity\n - \"evinf\" : eigenvalue entanglement infidelity\n - \"evagi\" : eigenvalue average gate infidelity\n - \"evnuinf\" : eigenvalue non-unitary entanglement infidelity\n - \"evnuagi\" : eigenvalue non-unitary entanglement infidelity\n - \"evdiamond\" : eigenvalue 1/2 diamond norm distance\n - \"evnudiamond\" : eigenvalue non-unitary 1/2 diamond norm distance\n - \"frob\" : frobenius distance\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, model, target_model, metric,\n confidence_region_info=None):\n \"\"\"\n Create a table showing a standard metric in a gauge-robust way.\n\n Parameters\n ----------\n model : Model\n The Model to display.\n\n target_model : Model\n The (usually ideal) reference model to compute gauge-invariant\n quantities with respect to.\n\n metric : str\n The abbreviation for the metric to use. Allowed values are:\n\n - \"inf\" : entanglement infidelity\n - \"agi\" : average gate infidelity\n - \"trace\" : 1/2 trace distance\n - \"diamond\" : 1/2 diamond norm distance\n - \"nuinf\" : non-unitary entanglement infidelity\n - \"nuagi\" : non-unitary entanglement infidelity\n - \"evinf\" : eigenvalue entanglement infidelity\n - \"evagi\" : eigenvalue average gate infidelity\n - \"evnuinf\" : eigenvalue non-unitary entanglement infidelity\n - \"evnuagi\" : eigenvalue non-unitary entanglement infidelity\n - \"evdiamond\" : eigenvalue 1/2 diamond norm distance\n - \"evnudiamond\" : eigenvalue non-unitary 1/2 diamond norm distance\n - \"frob\" : frobenius distance\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GaugeRobustMetricTable, self).__init__(ws, self._create, model, target_model,\n metric, confidence_region_info)\n\n def _create(self, model, target_model, metric, confidence_region_info):\n\n assert(isinstance(model, _models.ExplicitOpModel)), \"%s only works with explicit models\" % str(type(self))\n opLabels = model.primitive_op_labels\n\n colHeadings = [''] + ['%s' % str(lbl) for lbl in opLabels]\n formatters = [None] * len(colHeadings)\n confidence_region_info = None # Don't deal with CIs yet...\n\n # Table will essentially be a matrix whose diagonal elements are\n # --> metric(GateA_in_As_best_gauge, TargetA)\n # where a \"best gauge\" of a gate is one where it is co-diagonal with its target (same evecs can diag both).\n # Off-diagonal elements are given by:\n # --> min( metric(TargetA_in_Bs_best_gauge, TargetA), metric(TargetB_in_As_best_gauge, TargetB) )\n #\n # Thus, the diagonal elements tell us how much worse a (target) gate gets when just it's eigenvalues are\n # replaced with those of the actual estimated gate, and the off-diagonal elements tell us the least amount of\n # damage that must be done to a pair of (target) gates when just changing their eigenvectors to be consistent\n # with the actual estimated gates.\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n orig_model = model.copy()\n orig_model.set_all_parameterizations(\"full\") # so we can freely gauge transform this\n orig_target = target_model.copy()\n orig_target.set_all_parameterizations(\"full\") # so we can freely gauge transform this\n\n # ** A first attempt at fixing the gauge optimization issues. ** -- \"frobeniustt\" should replace this.\n #if metric in (\"inf\", \"agi\", \"nuinf\", \"nuagi\", \"evinf\", \"evagi\", \"evnuinf\", \"evnuagi\"):\n # gmetric = \"fidelity\"\n #elif metric in (\"trace\", \"diamond\", \"evdiamond\", \"evnudiamond\"):\n # gmetric = \"tracedist\"\n #else:\n # gmetric = \"frobenius\"\n gmetric = \"frobeniustt\"\n\n mdl_in_best_gauge = []\n target_mdl_in_best_gauge = []\n for lbl in opLabels:\n gate_mx = orig_model.operations[lbl].to_dense(on_space='HilbertSchmidt')\n target_gate_mx = target_model.operations[lbl].to_dense(on_space='HilbertSchmidt')\n Ugauge = _tools.compute_best_case_gauge_transform(gate_mx, target_gate_mx)\n Ugg = _models.gaugegroup.FullGaugeGroupElement(_np.linalg.inv(Ugauge))\n # transforms gates as Ugauge * gate * Ugauge_inv\n\n mdl = orig_model.copy()\n mdl.transform_inplace(Ugg)\n\n #DEBUG statements for trying to figure out why we get negative off-diagonals so often.\n #print(\"----- \",lbl,\"--------\")\n #print(\"PT1:\\n\",mdl.strdiff(target_model))\n #print(\"PT1b:\\n\",mdl.strdiff(target_model, 'inf'))\n try:\n _, Ugg_addl, mdl = _gopt.gaugeopt_to_target(mdl, orig_target, gates_metric=gmetric, spam_metric=gmetric,\n item_weights={'spam': 0, 'gates': 1e-4, lbl: 1.0},\n return_all=True, tol=1e-5, maxiter=100) # ADDITIONAL GOPT\n except Exception as e:\n _warnings.warn((\"GaugeRobustMetricTable gauge opt failed for %s label - \"\n \"falling back to frobenius metric! Error was:\\n%s\") % (lbl, str(e)))\n _, Ugg_addl, mdl = _gopt.gaugeopt_to_target(mdl, orig_target, gates_metric=\"frobenius\",\n spam_metric=\"frobenius\",\n item_weights={'spam': 0, 'gates': 1e-4, lbl: 1.0},\n return_all=True, tol=1e-5, maxiter=100) # ADDITIONAL GOPT\n\n #print(\"PT2:\\n\",mdl.strdiff(target_model))\n #print(\"PT2b:\\n\",mdl.strdiff(target_model, 'inf'))\n mdl_in_best_gauge.append(mdl)\n\n target_mdl = orig_target.copy()\n target_mdl.transform_inplace(Ugg)\n target_mdl.transform_inplace(Ugg_addl) # ADDITIONAL GOPT\n target_mdl_in_best_gauge.append(target_mdl)\n\n #FUTURE: instruments too?\n for i, lbl in enumerate(opLabels):\n row_data = [lbl]\n row_formatters = [None]\n\n for j, lbl2 in enumerate(opLabels):\n if i > j: # leave lower diagonal blank\n el = _ReportableQty(_np.nan)\n elif i == j: # diagonal element\n try:\n el = _reportables.evaluate_opfn_by_name(\n metric, mdl_in_best_gauge[i], target_model, lbl, confidence_region_info)\n except Exception:\n _warnings.warn(\"Error computing %s for %s op in gauge-robust metrics table!\" % (metric, lbl))\n el = _ReportableQty(_np.nan)\n else: # off-diagonal element\n try:\n el1 = _reportables.evaluate_opfn_by_name(\n metric, target_mdl_in_best_gauge[i], target_mdl_in_best_gauge[j], lbl2,\n confidence_region_info)\n el2 = _reportables.evaluate_opfn_by_name(\n metric, target_mdl_in_best_gauge[i], target_mdl_in_best_gauge[j], lbl,\n confidence_region_info)\n el = _rqty_minimum(el1, el2)\n except Exception:\n _warnings.warn(\"Error computing %s for %s,%s ops in gauge-robust metrics table!\" %\n (metric, lbl, lbl2))\n el = _ReportableQty(_np.nan)\n\n row_data.append(el)\n row_formatters.append('Normal')\n\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass ModelVsTargetTable(WorkspaceTable):\n \"\"\"\n Table comparing a Model (as a whole) to a target\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model to compare with `target_model`.\n\n target_model : Model\n The target model to compare with.\n\n clifford_compilation : dict\n A dictionary of circuits, one for each Clifford operation\n in the Clifford group relevant to the model Hilbert space. If\n None, then rows requiring a clifford compilation are omitted.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, model, target_model, clifford_compilation, confidence_region_info=None):\n \"\"\"\n Create a table comparing a model (as a whole) to a target model\n using metrics that can be evaluatd for an entire model.\n\n Parameters\n ----------\n model, target_model : Model\n The models to compare\n\n clifford_compilation : dict\n A dictionary of circuits, one for each Clifford operation\n in the Clifford group relevant to the model Hilbert space. If\n None, then rows requiring a clifford compilation are omitted.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(ModelVsTargetTable, self).__init__(ws, self._create, model,\n target_model, clifford_compilation,\n confidence_region_info)\n\n def _create(self, model, target_model, clifford_compilation, confidence_region_info):\n\n colHeadings = ('Metric', \"Value\")\n formatters = (None, None)\n\n tooltips = colHeadings\n table = _ReportTable(colHeadings, formatters, col_heading_labels=tooltips,\n confidence_region_info=confidence_region_info)\n\n #Leave this off for now, as it's primary use is to compare with RB and the predicted RB number is better\n #for this.\n #pAGsI = _ev(_reportables.Average_gateset_infidelity(model, target_model), confidence_region_info)\n #table.add_row((\"Avg. primitive model infidelity\", pAGsI), (None, 'Normal') )\n\n pRBnum = _ev(_reportables.Predicted_rb_number(model, target_model), confidence_region_info)\n table.add_row((\"Predicted primitive RB number\", pRBnum), (None, 'Normal'))\n\n from pygsti.forwardsims import MatrixForwardSimulator as _MatrixFSim\n if clifford_compilation and isinstance(model.sim, _MatrixFSim):\n clifford_model = _models.create_explicit_alias_model(model, clifford_compilation)\n clifford_targetModel = _models.create_explicit_alias_model(target_model, clifford_compilation)\n\n ##For clifford versions we don't have a confidence region - so no error bars\n #AGsI = _ev(_reportables.Average_gateset_infidelity(clifford_model, clifford_targetModel))\n #table.add_row((\"Avg. clifford model infidelity\", AGsI), (None, 'Normal') )\n\n RBnum = _ev(_reportables.Predicted_rb_number(clifford_model, clifford_targetModel))\n table.add_row((\"Predicted Clifford RB number\", RBnum), (None, 'Normal'))\n\n table.finish()\n return table\n\n\nclass GatesVsTargetTable(WorkspaceTable):\n \"\"\"\n Table comparing a Model's gates to those of a target model\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model to compare to `target_model`.\n\n target_model : model\n The model to compare with.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n display : tuple, optional\n A tuple of one or more of the allowed options (see below) which\n specify which columns are displayed in the table.\n\n - \"inf\" : entanglement infidelity\n - \"agi\" : average gate infidelity\n - \"trace\" : 1/2 trace distance\n - \"diamond\" : 1/2 diamond norm distance\n - \"nuinf\" : non-unitary entanglement infidelity\n - \"nuagi\" : non-unitary entanglement infidelity\n - \"evinf\" : eigenvalue entanglement infidelity\n - \"evagi\" : eigenvalue average gate infidelity\n - \"evnuinf\" : eigenvalue non-unitary entanglement infidelity\n - \"evnuagi\" : eigenvalue non-unitary entanglement infidelity\n - \"evdiamond\" : eigenvalue 1/2 diamond norm distance\n - \"evnudiamond\" : eigenvalue non-unitary 1/2 diamond norm distance\n - \"frob\" : frobenius distance\n - \"unmodeled\" : unmodeled \"wildcard\" budget\n\n virtual_ops : list, optional\n If not None, a list of `Circuit` objects specifying additional \"gates\"\n (i.e. processes) to compute eigenvalues of. Length-1 circuits are\n automatically discarded so they are not displayed twice.\n\n wildcard: PrimitiveOpsWildcardBudget\n A wildcard budget with a `budget_for` method that is used to\n fill in the \"unmodeled\" error column when it is requested.\n \"\"\"\n\n def __init__(self, ws, model, target_model, confidence_region_info=None,\n display=('inf', 'agi', 'trace', 'diamond', 'nuinf', 'nuagi'),\n virtual_ops=None, wildcard=None):\n \"\"\"\n Create a table comparing a model's gates to a target model using\n metrics such as the infidelity, diamond-norm distance, and trace distance.\n\n Parameters\n ----------\n model, target_model : Model\n The models to compare\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n display : tuple, optional\n A tuple of one or more of the allowed options (see below) which\n specify which columns are displayed in the table.\n\n - \"inf\" : entanglement infidelity\n - \"agi\" : average gate infidelity\n - \"trace\" : 1/2 trace distance\n - \"diamond\" : 1/2 diamond norm distance\n - \"nuinf\" : non-unitary entanglement infidelity\n - \"nuagi\" : non-unitary entanglement infidelity\n - \"evinf\" : eigenvalue entanglement infidelity\n - \"evagi\" : eigenvalue average gate infidelity\n - \"evnuinf\" : eigenvalue non-unitary entanglement infidelity\n - \"evnuagi\" : eigenvalue non-unitary entanglement infidelity\n - \"evdiamond\" : eigenvalue 1/2 diamond norm distance\n - \"evnudiamond\" : eigenvalue non-unitary 1/2 diamond norm distance\n - \"frob\" : frobenius distance\n - \"unmodeled\" : unmodeled \"wildcard\" budget\n\n virtual_ops : list, optional\n If not None, a list of `Circuit` objects specifying additional \"gates\"\n (i.e. processes) to compute eigenvalues of. Length-1 circuits are\n automatically discarded so they are not displayed twice.\n\n wildcard: PrimitiveOpsWildcardBudget\n A wildcard budget with a `budget_for` method that is used to\n fill in the \"unmodeled\" error column when it is requested.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GatesVsTargetTable, self).__init__(ws, self._create, model,\n target_model, confidence_region_info,\n display, virtual_ops, wildcard)\n\n def _create(self, model, target_model, confidence_region_info,\n display, virtual_ops, wildcard):\n\n opLabels = model.primitive_op_labels # operation labels\n instLabels = list(model.instruments.keys()) # requires an explicit model!\n assert(isinstance(model, _models.ExplicitOpModel)), \"%s only works with explicit models\" % str(type(self))\n\n colHeadings = ['Gate'] if (virtual_ops is None) else ['Gate or Germ']\n tooltips = ['Gate'] if (virtual_ops is None) else ['Gate or Germ']\n for disp in display:\n if disp == \"unmodeled\" and not wildcard: continue # skip wildcard column if there is no wilcard info\n try:\n heading, tooltip = _reportables.info_of_opfn_by_name(disp)\n except ValueError:\n raise ValueError(\"Invalid display column name: %s\" % disp)\n colHeadings.append(heading)\n tooltips.append(tooltip)\n\n formatters = (None,) + ('Conversion',) * (len(colHeadings) - 1)\n\n table = _ReportTable(colHeadings, formatters, col_heading_labels=tooltips,\n confidence_region_info=confidence_region_info)\n\n formatters = (None,) + ('Normal',) * (len(colHeadings) - 1)\n\n if virtual_ops is None:\n iterOver = opLabels\n else:\n iterOver = opLabels + tuple((v for v in virtual_ops if len(v) > 1))\n\n for gl in iterOver:\n #Note: gl may be a operation label (a string) or a Circuit\n row_data = [str(gl)]\n\n for disp in display:\n if disp == \"unmodeled\": # a special case for now\n if wildcard:\n row_data.append(_ReportableQty(\n wildcard.budget_for(gl)))\n continue # Note: don't append anything if 'not wildcard'\n\n #import time as _time #DEBUG\n #tStart = _time.time() #DEBUG\n if target_model is None:\n qty = _ReportableQty(_np.nan)\n else:\n qty = _reportables.evaluate_opfn_by_name(\n disp, model, target_model, gl, confidence_region_info)\n #tm = _time.time()-tStart #DEBUG\n #if tm > 0.01: print(\"DB: Evaluated %s in %gs\" % (disp, tm)) #DEBUG\n row_data.append(qty)\n\n table.add_row(row_data, formatters)\n\n #Iterate over instruments\n for il in instLabels:\n row_data = [str(il)]\n\n #Note: could move this to a reportables function in future for easier\n # confidence region support - for now, no CI support:\n for disp in display:\n if disp == \"unmodeled\": # a special case for now\n if wildcard:\n row_data.append(_ReportableQty(wildcard.budget_for(il)))\n continue # Note: don't append anything if 'not wildcard'\n\n if target_model is None:\n qty = _ReportableQty(_np.nan)\n else:\n qty = _reportables.evaluate_instrumentfn_by_name(\n disp, model, target_model, il, confidence_region_info)\n #tm = _time.time()-tStart #DEBUG\n #if tm > 0.01: print(\"DB: Evaluated %s in %gs\" % (disp, tm)) #DEBUG\n row_data.append(qty)\n\n table.add_row(row_data, formatters)\n\n table.finish()\n return table\n\n\nclass SpamVsTargetTable(WorkspaceTable):\n \"\"\"\n Table comparing a Model's SPAM vectors to those of a target\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model to compare to `target_model`.\n\n target_model : model\n The model to compare with.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, model, target_model, confidence_region_info=None):\n \"\"\"\n Create a table comparing a model's SPAM operations to a target model\n using state infidelity and trace distance.\n\n Parameters\n ----------\n model, target_model : Model\n The models to compare\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(SpamVsTargetTable, self).__init__(ws, self._create, model,\n target_model, confidence_region_info)\n\n def _create(self, model, target_model, confidence_region_info):\n\n prepLabels = list(model.preps.keys())\n povmLabels = list(model.povms.keys())\n\n colHeadings = ('Prep/POVM', \"Infidelity\", \"1/2 Trace|Distance\", \"1/2 Diamond-Dist\")\n formatters = (None, 'Conversion', 'Conversion', 'Conversion')\n tooltips = ('', 'State infidelity or entanglement infidelity of POVM map',\n 'Trace distance between states (preps) or Jamiolkowski states of POVM maps',\n 'Half-diamond-norm distance between POVM maps')\n table = _ReportTable(colHeadings, formatters, col_heading_labels=tooltips,\n confidence_region_info=confidence_region_info)\n\n formatters = ['Rho'] + ['Normal'] * (len(colHeadings) - 1)\n prepInfidelities = [_ev(_reportables.Vec_infidelity(model, target_model, l,\n 'prep'), confidence_region_info)\n for l in prepLabels]\n prepTraceDists = [_ev(_reportables.Vec_tr_diff(model, target_model, l,\n 'prep'), confidence_region_info)\n for l in prepLabels]\n prepDiamondDists = [_ReportableQty(_np.nan)] * len(prepLabels)\n for rowData in zip(prepLabels, prepInfidelities, prepTraceDists,\n prepDiamondDists):\n table.add_row(rowData, formatters)\n\n formatters = ['Normal'] + ['Normal'] * (len(colHeadings) - 1)\n povmInfidelities = [_ev(_reportables.POVM_entanglement_infidelity(\n model, target_model, l), confidence_region_info)\n for l in povmLabels]\n povmTraceDists = [_ev(_reportables.POVM_jt_diff(\n model, target_model, l), confidence_region_info)\n for l in povmLabels]\n povmDiamondDists = [_ev(_reportables.POVM_half_diamond_norm(\n model, target_model, l), confidence_region_info)\n for l in povmLabels]\n\n for rowData in zip(povmLabels, povmInfidelities, povmTraceDists,\n povmDiamondDists):\n table.add_row(rowData, formatters)\n\n table.finish()\n return table\n\n\nclass ErrgenTable(WorkspaceTable):\n \"\"\"\n Table displaying the error generators of a Model's gates and their projections.\n\n Projections are given onto spaces of standard generators.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model to compare to `target_model`.\n\n target_model : model\n The model to compare with.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n display : tuple of {\"errgen\",\"H\",\"S\",\"A\"}\n Specifes which columns to include: the error generator itself\n and the projections of the generator onto Hamiltoian-type error\n (generators), Stochastic-type errors, and Affine-type errors.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the requested matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored boxes\n (space-conserving and better for large matrices).\n\n gen_type : {\"logG-logT\", \"logTiG\", \"logGTi\"}\n The type of error generator to compute. Allowed values are:\n\n - \"logG-logT\" : errgen = log(gate) - log(target_op)\n - \"logTiG\" : errgen = log( dot(inv(target_op), gate) )\n - \"logTiG\" : errgen = log( dot(gate, inv(target_op)) )\n \"\"\"\n\n def __init__(self, ws, model, target_model, confidence_region_info=None,\n display=(\"errgen\", \"H\", \"S\", \"A\"), display_as=\"boxes\",\n gen_type=\"logGTi\"):\n \"\"\"\n Create a table listing the error generators obtained by\n comparing a model's gates to a target model.\n\n Parameters\n ----------\n model, target_model : Model\n The models to compare\n\n display : tuple of {\"errgen\",\"H\",\"S\",\"A\"}\n Specifes which columns to include: the error generator itself\n and the projections of the generator onto Hamiltoian-type error\n (generators), Stochastic-type errors, and Affine-type errors.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the requested matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored boxes\n (space-conserving and better for large matrices).\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n gen_type : {\"logG-logT\", \"logTiG\", \"logGTi\"}\n The type of error generator to compute. Allowed values are:\n\n - \"logG-logT\" : errgen = log(gate) - log(target_op)\n - \"logTiG\" : errgen = log( dot(inv(target_op), gate) )\n - \"logTiG\" : errgen = log( dot(gate, inv(target_op)) )\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(ErrgenTable, self).__init__(ws, self._create, model,\n target_model, confidence_region_info,\n display, display_as, gen_type)\n\n def _create(self, model, target_model,\n confidence_region_info, display, display_as, gen_type):\n\n opLabels = model.primitive_op_labels # operation labels\n basis = model.basis\n basisPrefix = \"\"\n if basis.name == \"pp\": basisPrefix = \"Pauli \"\n elif basis.name == \"qt\": basisPrefix = \"Qutrit \"\n elif basis.name == \"gm\": basisPrefix = \"GM \"\n elif basis.name == \"std\": basisPrefix = \"Mx unit \"\n\n colHeadings = ['Gate']\n\n for disp in display:\n if disp == \"errgen\":\n colHeadings.append('Error Generator')\n elif disp == \"H\":\n colHeadings.append('%sHamiltonian Projections' % basisPrefix)\n elif disp == \"S\":\n colHeadings.append('%sStochastic Projections' % basisPrefix)\n elif disp == \"A\":\n colHeadings.append('%sAffine Projections' % basisPrefix)\n else: raise ValueError(\"Invalid display element: %s\" % disp)\n\n assert(display_as == \"boxes\" or display_as == \"numbers\")\n table = _ReportTable(colHeadings, (None,) * len(colHeadings),\n confidence_region_info=confidence_region_info)\n\n errgenAndProjs = {}\n errgensM = []\n hamProjsM = []\n stoProjsM = []\n affProjsM = []\n\n def _get_min_max(max_lst, m):\n \"\"\"return a [min,max] already in list if there's one within an\n order of magnitude\"\"\"\n m = max(m, ABS_THRESHOLD)\n for mx in max_lst:\n if (abs(m) >= 1e-6 and 0.9999 < mx / m < 10) or (abs(mx) < 1e-6 and abs(m) < 1e-6):\n return -mx, mx\n return None\n\n ABS_THRESHOLD = 1e-6 # don't let color scales run from 0 to 0: at least this much!\n\n def add_max(max_lst, m):\n \"\"\"add `m` to a list of maximas if it's different enough from\n existing elements\"\"\"\n m = max(m, ABS_THRESHOLD)\n if not _get_min_max(max_lst, m):\n max_lst.append(m)\n\n #Do computation, so shared color scales can be computed\n for gl in opLabels:\n if gen_type == \"logG-logT\":\n info = _ev(_reportables.LogGmlogT_and_projections(\n model, target_model, gl), confidence_region_info)\n elif gen_type == \"logTiG\":\n info = _ev(_reportables.LogTiG_and_projections(\n model, target_model, gl), confidence_region_info)\n elif gen_type == \"logGTi\":\n info = _ev(_reportables.LogGTi_and_projections(\n model, target_model, gl), confidence_region_info)\n else: raise ValueError(\"Invalid generator type: %s\" % gen_type)\n errgenAndProjs[gl] = info\n\n errgen = info['error generator'].value\n absMax = _np.max(_np.abs(errgen))\n add_max(errgensM, absMax)\n\n if \"H\" in display:\n absMax = _np.max(_np.abs(info['hamiltonian projections'].value))\n add_max(hamProjsM, absMax)\n\n if \"S\" in display:\n absMax = _np.max(_np.abs(info['stochastic projections'].value))\n add_max(stoProjsM, absMax)\n\n if \"A\" in display:\n absMax = _np.max(_np.abs(info['affine projections'].value))\n add_max(affProjsM, absMax)\n\n #Do plotting\n for gl in opLabels:\n row_data = [gl]\n row_formatters = [None]\n info = errgenAndProjs[gl]\n\n for disp in display:\n if disp == \"errgen\":\n if display_as == \"boxes\":\n errgen, EB = info['error generator'].value_and_errorbar\n m, M = _get_min_max(errgensM, _np.max(_np.abs(errgen)))\n errgen_fig = _wp.GateMatrixPlot(self.ws, errgen, m, M,\n basis, eb_matrix=EB)\n row_data.append(errgen_fig)\n row_formatters.append('Figure')\n else:\n row_data.append(info['error generator'])\n row_formatters.append('Brackets')\n\n elif disp == \"H\":\n if display_as == \"boxes\":\n T = \"Power %.2g\" % info['hamiltonian projection power'].value\n hamProjs, EB = info['hamiltonian projections'].value_and_errorbar\n m, M = _get_min_max(hamProjsM, _np.max(_np.abs(hamProjs)))\n hamdecomp_fig = _wp.ProjectionsBoxPlot(\n self.ws, hamProjs, basis, m, M,\n box_labels=True, eb_matrix=EB, title=T)\n row_data.append(hamdecomp_fig)\n row_formatters.append('Figure')\n else:\n row_data.append(info['hamiltonian projections'])\n row_formatters.append('Brackets')\n\n elif disp == \"S\":\n if display_as == \"boxes\":\n T = \"Power %.2g\" % info['stochastic projection power'].value\n stoProjs, EB = info['stochastic projections'].value_and_errorbar\n m, M = _get_min_max(stoProjsM, _np.max(_np.abs(stoProjs)))\n stodecomp_fig = _wp.ProjectionsBoxPlot(\n self.ws, stoProjs, basis, m, M,\n box_labels=True, eb_matrix=EB, title=T)\n row_data.append(stodecomp_fig)\n row_formatters.append('Figure')\n else:\n row_data.append(info['stochastic projections'])\n row_formatters.append('Brackets')\n\n elif disp == \"A\":\n if display_as == \"boxes\":\n T = \"Power %.2g\" % info['affine projection power'].value\n affProjs, EB = info['affine projections'].value_and_errorbar\n m, M = _get_min_max(affProjsM, _np.max(_np.abs(affProjs)))\n affdecomp_fig = _wp.ProjectionsBoxPlot(\n self.ws, affProjs, basis, m, M,\n box_labels=True, eb_matrix=EB, title=T)\n row_data.append(affdecomp_fig)\n row_formatters.append('Figure')\n else:\n row_data.append(info['affine projections'])\n row_formatters.append('Brackets')\n\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass GaugeRobustErrgenTable(WorkspaceTable):\n \"\"\"\n Table of gauge-robust error generators.\n\n A table displaying the first-order gauge invariant (\"gauge robust\")\n linear combinations of standard error generator coefficients for\n the gates in a model.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model to compare to `target_model`.\n\n target_model : model\n The model to compare with.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n gen_type : {\"logG-logT\", \"logTiG\", \"logGTi\"}\n The type of error generator to compute. Allowed values are:\n\n - \"logG-logT\" : errgen = log(gate) - log(target_op)\n - \"logTiG\" : errgen = log( dot(inv(target_op), gate) )\n - \"logTiG\" : errgen = log( dot(gate, inv(target_op)) )\n \"\"\"\n\n def __init__(self, ws, model, target_model, confidence_region_info=None,\n gen_type=\"logGTi\"):\n \"\"\"\n Create a table listing the first-order gauge invariant (\"gauge robust\")\n linear combinations of standard error generator coefficients for\n the gates in `model`. This table identifies, through the use of\n \"synthetic idle tomography\", which combinations of standard-error-\n generator coefficients are robust (to first-order) to gauge variations.\n\n Parameters\n ----------\n model, target_model : Model\n The models to compare\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n gen_type : {\"logG-logT\", \"logTiG\", \"logGTi\"}\n The type of error generator to compute. Allowed values are:\n\n - \"logG-logT\" : errgen = log(gate) - log(target_op)\n - \"logTiG\" : errgen = log( dot(inv(target_op), gate) )\n - \"logTiG\" : errgen = log( dot(gate, inv(target_op)) )\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GaugeRobustErrgenTable, self).__init__(ws, self._create, model,\n target_model, confidence_region_info,\n gen_type)\n\n def _create(self, model, target_model, confidence_region_info, gen_type):\n assert(isinstance(model, _models.ExplicitOpModel)), \"%s only works with explicit models\" % str(type(self))\n\n colHeadings = ['Error rates', 'Value']\n\n table = _ReportTable(colHeadings, (None,) * len(colHeadings),\n confidence_region_info=confidence_region_info)\n\n assert(gen_type == \"logGTi\"), \"Only `gen_type == \\\"logGTI\\\"` is supported when `gaugeRobust` is True\"\n syntheticIdleStrs = []\n\n ## Construct synthetic idles\n maxPower = 4; maxLen = 6; Id = _np.identity(target_model.dim, 'd')\n baseStrs = _circuits.list_all_circuits_without_powers_and_cycles(list(model.operations.keys()), maxLen)\n for s in baseStrs:\n for i in range(1, maxPower):\n if len(s**i) > 1 and _np.linalg.norm(target_model.sim.product(s**i) - Id) < 1e-6:\n syntheticIdleStrs.append(s**i); break\n #syntheticIdleStrs = _circuits.to_circuits([ ('Gx',)*4, ('Gy',)*4 ] ) #DEBUG!!!\n #syntheticIdleStrs = _circuits.to_circuits([ ('Gx',)*4, ('Gy',)*4, ('Gy','Gx','Gx')*2] ) #DEBUG!!!\n print(\"Using synthetic idles: \\n\", '\\n'.join([str(opstr) for opstr in syntheticIdleStrs]))\n\n gaugeRobust_info = _ev(_reportables.Robust_LogGTi_and_projections(\n model, target_model, syntheticIdleStrs), confidence_region_info)\n\n for linear_combo_lbl, val in gaugeRobust_info.items():\n row_data = [linear_combo_lbl, val]\n row_formatters = [None, 'Normal']\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass NQubitErrgenTable(WorkspaceTable):\n \"\"\"\n Table displaying the error rates (coefficients of error generators) of a Model's gates.\n\n The gates are assumed to have a particular structure.\n\n Specifically, gates must be :class:`LindbladOp` or\n :class:`StaticArbitraryOp` objects wrapped within :class:`EmbeddedOp` and/or\n :class:`ComposedOp` objects (this is consistent with the operation\n blocks of a :class:`CloudNoiseModel`). As such, error rates\n are read directly from the gate objects rather than being computed by\n projecting dense gate representations onto a \"basis\" of fixed error\n generators (e.g. H+S+A generators).\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model to analyze.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n display : tuple of {\"H\",\"S\",\"A\"}\n Specifes which columns to include: Hamiltoian-type,\n Pauli-Stochastic-type, and Affine-type rates, respectively.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the requested matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored boxes\n (space-conserving and better for large matrices).\n \"\"\"\n\n def __init__(self, ws, model, confidence_region_info=None,\n display=(\"H\", \"S\", \"A\"), display_as=\"boxes\"):\n \"\"\"\n Create a table listing the error rates of the gates in `model`.\n\n The gates in `model` are assumed to have a particular structure,\n namely: they must be :class:`LindbladOp` or\n :class:`StaticArbitraryOp` objects wrapped within :class:`EmbeddedOp`\n and/or :class:`ComposedOp` objects.\n\n Error rates are organized by order of composition and which qubits\n the corresponding error generators act upon.\n\n Parameters\n ----------\n model : Model\n The model to analyze.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n display : tuple of {\"H\",\"S\",\"A\"}\n Specifes which columns to include: Hamiltoian-type,\n Pauli-Stochastic-type, and Affine-type rates, respectively.\n\n display_as : {\"numbers\", \"boxes\"}, optional\n How to display the requested matrices, as either numerical\n grids (fine for small matrices) or as a plot of colored boxes\n (space-conserving and better for large matrices).\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(NQubitErrgenTable, self).__init__(ws, self._create, model,\n confidence_region_info,\n display, display_as)\n\n def _create(self, model, confidence_region_info, display, display_as):\n opLabels = model.primitive_op_labels # operation labels\n\n #basis = model.basis\n #basisPrefix = \"\"\n #if basis.name == \"pp\": basisPrefix = \"Pauli \"\n #elif basis.name == \"qt\": basisPrefix = \"Qutrit \"\n #elif basis.name == \"gm\": basisPrefix = \"GM \"\n #elif basis.name == \"std\": basisPrefix = \"Mx unit \"\n\n colHeadings = ['Gate', 'Compos', 'SSLbls']\n\n for disp in display:\n #if disp == \"errgen\":\n # colHeadings.append('Error Generator')\n if disp == \"H\":\n colHeadings.append('Hamiltonian Coeffs')\n elif disp == \"S\":\n colHeadings.append('Stochastic Coeffs')\n elif disp == \"A\":\n colHeadings.append('Affine Coeffs')\n else: raise ValueError(\"Invalid display element: %s\" % disp)\n\n assert(display_as == \"boxes\" or display_as == \"numbers\")\n table = _ReportTable(colHeadings, (None,) * len(colHeadings),\n confidence_region_info=confidence_region_info)\n\n def _get_min_max(max_lst, m):\n \"\"\"return a [min,max] already in list if there's one within an\n order of magnitude\"\"\"\n m = max(m, ABS_THRESHOLD)\n for mx in max_lst:\n if (abs(m) >= 1e-6 and 0.9999 < mx / m < 10) or (abs(mx) < 1e-6 and abs(m) < 1e-6):\n return -mx, mx\n return None\n\n ABS_THRESHOLD = 1e-6 # don't let color scales run from 0 to 0: at least this much!\n\n def add_max(max_lst, m):\n \"\"\"add `m` to a list of maximas if it's different enough from\n existing elements\"\"\"\n m = max(m, ABS_THRESHOLD)\n if not _get_min_max(max_lst, m):\n max_lst.append(m)\n\n pre_rows = []; displayed_params = set()\n\n def process_gate(lbl, gate, comppos_prefix, sslbls):\n if isinstance(gate, _op.ComposedOp):\n for i, fgate in enumerate(gate.factorops):\n process_gate(lbl, fgate, comppos_prefix + (i,), sslbls)\n elif isinstance(gate, _op.EmbeddedOp):\n process_gate(lbl, gate.embedded_op, comppos_prefix, gate.targetLabels)\n elif isinstance(gate, _op.StaticArbitraryOp):\n pass # no error coefficients associated w/static gates\n elif isinstance(gate, _op.ComposedOp):\n\n # Only display coeffs for gates that correspond to *new*\n # (not yet displayed) parameters.\n params = set(gate.gpindices_as_array())\n if not params.issubset(displayed_params):\n displayed_params.update(params)\n\n Ldict, basis = gate.errorgen_coefficients(return_basis=True) # does this work w/ComposedOp?\n sparse = basis.sparse\n\n #Try to find good labels for these basis elements\n # (so far, just try to match with \"pp\" basis els)\n ref_basis = _baseobjs.BuiltinBasis(\"pp\", gate.dim, sparse=sparse)\n basisLbls = {}\n for bl1, mx in zip(basis.labels, basis.elements):\n for bl2, mx2 in zip(ref_basis.labels, ref_basis.elements):\n if (sparse and _tools.sparse_equal(mx, mx2)) or (not sparse and _np.allclose(mx, mx2)):\n basisLbls[bl1] = bl2; break\n else:\n basisLbls[bl1] = bl1\n\n pre_rows.append((lbl, comppos_prefix, sslbls, Ldict, basisLbls))\n else:\n raise ValueError(\"Unknown gate type for NQubitErrgenTable: %s\" % str(type(gate)))\n\n def _get_plot_info(lindblad_dict, basis_lbls, typ):\n # for now just make a 1D plot - can get fancy later...\n ylabels = [\"\"]\n xlabels = []\n coeffs = []\n for termInfo, coeff in lindblad_dict.items():\n termtyp = termInfo[0]\n if termtyp not in (\"H\", \"S\", \"A\"): raise ValueError(\"Unknown terminfo: \", termInfo)\n if (termtyp == \"H\" and typ == \"hamiltonian\") or \\\n (termtyp == \"S\" and typ == \"stochastic\") or \\\n (termtyp == \"A\" and typ == \"affine\"):\n assert(len(termInfo) == 2), \"Non-diagonal terms not suppoted (yet)!\"\n xlabels.append(basis_lbls[termInfo[1]])\n coeffs.append(coeff)\n return _np.array([coeffs]), xlabels, ylabels\n\n #Do computation, so shared color scales can be computed\n if isinstance(model, _models.ExplicitOpModel):\n for gl in opLabels:\n process_gate(gl, model.operations[gl], (), None)\n elif isinstance(model, _models.LocalNoiseModel): # process primitive op error\n for gl in opLabels:\n process_gate(gl, model.operation_blks['layers'][gl], (), None)\n elif isinstance(model, _models.CloudNoiseModel): # process primitive op error\n for gl in opLabels:\n process_gate(gl, model.operation_blks['cloudnoise'][gl], (), None)\n else:\n raise ValueError(\"Unrecognized type of model: %s\" % str(type(model)))\n\n #get min/max\n if len(pre_rows) > 0:\n M = max((max(map(abs, Ldict.values())) for _, _, _, Ldict, _ in pre_rows))\n m = -M\n else:\n M = m = 0\n\n #Now pre_rows is filled, so we just need to create the plots:\n for gl, comppos, sslbls, Ldict, basisLbls in pre_rows:\n row_data = [gl, str(comppos), str(sslbls)]\n row_formatters = [None, None, None]\n\n for disp in display:\n if disp == \"H\":\n hamCoeffs, xlabels, ylabels = _get_plot_info(Ldict, basisLbls, \"hamiltonian\")\n if display_as == \"boxes\":\n #m,M = _get_min_max(coeffsM,_np.max(_np.abs(hamCoeffs)))\n # May need to add EB code and/or title to MatrixPlot in FUTURE\n hamCoeffs_fig = _wp.MatrixPlot(\n self.ws, hamCoeffs, m, M, xlabels, ylabels,\n box_labels=True, prec=\"compacthp\")\n row_data.append(hamCoeffs_fig)\n row_formatters.append('Figure')\n else:\n row_data.append(hamCoeffs)\n row_formatters.append('Brackets')\n\n if disp == \"S\":\n stoCoeffs, xlabels, ylabels = _get_plot_info(Ldict, basisLbls, \"stochastic\")\n if display_as == \"boxes\":\n #m,M = _get_min_max(coeffsM,_np.max(_np.abs(stoCoeffs)))\n # May need to add EB code and/or title to MatrixPlot in FUTURE\n stoCoeffs_fig = _wp.MatrixPlot(\n self.ws, stoCoeffs, m, M, xlabels, ylabels,\n box_labels=True, prec=\"compacthp\")\n row_data.append(stoCoeffs_fig)\n row_formatters.append('Figure')\n else:\n row_data.append(stoCoeffs)\n row_formatters.append('Brackets')\n\n if disp == \"A\":\n affCoeffs, xlabels, ylabels = _get_plot_info(Ldict, basisLbls, \"affine\")\n if display_as == \"boxes\":\n #m,M = _get_min_max(coeffsM,_np.max(_np.abs(effCoeffs)))\n # May need to add EB code and/or title to MatrixPlot in FUTURE\n affCoeffs_fig = _wp.MatrixPlot(\n self.ws, affCoeffs, m, M, xlabels, ylabels,\n box_labels=True, prec=\"compacthp\")\n row_data.append(affCoeffs_fig)\n row_formatters.append('Figure')\n else:\n row_data.append(affCoeffs)\n row_formatters.append('Brackets')\n\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass OldRotationAxisVsTargetTable(WorkspaceTable):\n \"\"\"\n Old 1-qubit-only gate rotation axis table\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model to compare to `target_model`. Must be single qubit.\n\n target_model : model\n The model to compare with. Must be single qubit.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, model, target_model, confidence_region_info=None):\n \"\"\"\n Create a table comparing the rotation axes of the single-qubit gates in\n `model` with those in `target_model`. Differences are shown as\n angles between the rotation axes of corresponding gates.\n\n Parameters\n ----------\n model, target_model : Model\n The models to compare. Must be single-qubit.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(OldRotationAxisVsTargetTable, self).__init__(\n ws, self._create, model, target_model, confidence_region_info)\n\n def _create(self, model, target_model, confidence_region_info):\n\n opLabels = model.primitive_op_labels # operation labels\n\n colHeadings = ('Gate', \"Angle between|rotation axes\")\n formatters = (None, 'Conversion')\n\n anglesList = [_ev(_reportables.Model_model_angles_btwn_axes(\n model, target_model, gl), confidence_region_info) for gl in opLabels]\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n formatters = [None] + ['Pi']\n\n for gl, angle in zip(opLabels, anglesList):\n rowData = [gl] + [angle]\n table.add_row(rowData, formatters)\n\n table.finish()\n return table\n\n\nclass GateDecompTable(WorkspaceTable):\n \"\"\"\n Table of angle & axis decompositions of a Model's gates\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The estimated model.\n\n target_model : Model\n The target model, used to help disambiguate the matrix\n logarithms that are used in the decomposition.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, model, target_model, confidence_region_info=None):\n \"\"\"\n Create table for decomposing a model's gates.\n\n This table interprets the Hamiltonian projection of the log\n of the operation matrix to extract a rotation angle and axis.\n\n Parameters\n ----------\n model : Model\n The estimated model.\n\n target_model : Model\n The target model, used to help disambiguate the matrix\n logarithms that are used in the decomposition.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GateDecompTable, self).__init__(ws, self._create, model,\n target_model, confidence_region_info)\n\n def _create(self, model, target_model, confidence_region_info):\n opLabels = model.primitive_op_labels # operation labels\n\n colHeadings = ('Gate', 'Ham. Evals.', 'Rotn. angle', 'Rotn. axis', 'Log Error') \\\n + tuple([\"Axis angle w/%s\" % str(gl) for gl in opLabels])\n tooltips = (\n 'Gate', 'Hamiltonian Eigenvalues', 'Rotation angle', 'Rotation axis',\n 'Taking the log of a gate may be performed approximately. This is '\n 'error in that estimate, i.e. norm(G - exp(approxLogG)).'\n ) + tuple([\"Angle between the rotation axis of %s and the gate of the current row\"\n % str(gl) for gl in opLabels])\n formatters = [None] * len(colHeadings)\n\n table = _ReportTable(colHeadings, formatters,\n col_heading_labels=tooltips, confidence_region_info=confidence_region_info)\n formatters = (None, 'Pi', 'Pi', 'Figure', 'Normal') + ('Pi',) * len(opLabels)\n\n decomp = _ev(_reportables.General_decomposition(\n model, target_model), confidence_region_info)\n\n for gl in opLabels:\n gl = str(gl) # Label -> str for decomp-dict keys\n axis, axisEB = decomp[gl + ' axis'].value_and_errorbar\n axisFig = _wp.ProjectionsBoxPlot(self.ws, axis, model.basis, -1.0, 1.0,\n box_labels=True, eb_matrix=axisEB)\n decomp[gl + ' hamiltonian eigenvalues'].scale_inplace(1.0 / _np.pi) # scale evals to units of pi\n rowData = [gl, decomp[gl + ' hamiltonian eigenvalues'],\n decomp[gl + ' angle'], axisFig,\n decomp[gl + ' log inexactness']]\n\n for gl_other in opLabels:\n gl_other = str(gl_other)\n rotnAngle = decomp[gl + ' angle'].value\n rotnAngle_other = decomp[gl_other + ' angle'].value\n\n if gl_other == gl:\n rowData.append(\"\")\n elif abs(rotnAngle) < 1e-4 or abs(rotnAngle_other) < 1e-4:\n rowData.append(\"--\")\n else:\n rowData.append(decomp[gl + ',' + gl_other + ' axis angle'])\n\n table.add_row(rowData, formatters)\n\n table.finish()\n return table\n\n\nclass OldGateDecompTable(WorkspaceTable):\n \"\"\"\n 1-qubit-only table of gate decompositions\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n A single-qubit `Model`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, model, confidence_region_info=None):\n \"\"\"\n Create table for decomposing a single-qubit model's gates.\n\n This table interprets the eigenvectors and eigenvalues of the\n gates to extract a rotation angle, axis, and various decay\n coefficients.\n\n Parameters\n ----------\n model : Model\n A single-qubit `Model`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(OldGateDecompTable, self).__init__(ws, self._create, model, confidence_region_info)\n\n def _create(self, model, confidence_region_info):\n\n opLabels = model.primitive_op_labels # operation labels\n colHeadings = ('Gate', 'Eigenvalues', 'Fixed pt', 'Rotn. axis', 'Diag. decay', 'Off-diag. decay')\n formatters = [None] * 6\n\n assert(isinstance(model, _models.ExplicitOpModel)), \"OldGateDecompTable only works with explicit models\"\n decomps = [_reportables.decomposition(model.operations[gl]) for gl in opLabels]\n decompNames = ('fixed point',\n 'axis of rotation',\n 'decay of diagonal rotation terms',\n 'decay of off diagonal rotation terms')\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n formatters = (None, 'Vec', 'Normal', 'Normal', 'Normal', 'Normal')\n\n for decomp, gl in zip(decomps, opLabels):\n evals = _ev(_reportables.GateEigenvalues(model, gl))\n decomp, decompEB = decomp.value_and_errorbar # OLD\n\n rowData = [gl, evals] + [decomp.get(x, 'X') for x in decompNames[0:2]] + \\\n [(decomp.get(x, 'X'), decompEB) for x in decompNames[2:4]]\n\n table.add_row(rowData, formatters)\n\n table.finish()\n return table\n\n\nclass OldRotationAxisTable(WorkspaceTable):\n \"\"\"\n 1-qubit-only table of gate rotation angles and axes\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n A single-qubit `Model`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n show_axis_angle_err_bars : bool, optional\n Whether or not table should include error bars on the angles\n between rotation axes (doing so makes the table take up more\n space).\n \"\"\"\n\n def __init__(self, ws, model, confidence_region_info=None, show_axis_angle_err_bars=True):\n \"\"\"\n Create a table of the angle between a gate rotation axes for\n gates belonging to a single-qubit model.\n\n Parameters\n ----------\n model : Model\n A single-qubit `Model`.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n show_axis_angle_err_bars : bool, optional\n Whether or not table should include error bars on the angles\n between rotation axes (doing so makes the table take up more\n space).\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(OldRotationAxisTable, self).__init__(ws, self._create, model, confidence_region_info,\n show_axis_angle_err_bars)\n\n def _create(self, model, confidence_region_info, show_axis_angle_err_bars):\n\n opLabels = model.primitive_op_labels\n\n assert(isinstance(model, _models.ExplicitOpModel)), \"OldRotationAxisTable only works with explicit models\"\n decomps = [_reportables.decomposition(model.operations[gl]) for gl in opLabels]\n\n colHeadings = (\"Gate\", \"Angle\") + tuple([\"RAAW(%s)\" % gl for gl in opLabels])\n nCols = len(colHeadings)\n formatters = [None] * nCols\n\n table = \"tabular\"\n latex_head = \"\\\\begin{%s}[l]{%s}\\n\\hline\\n\" % (table, \"|c\" * nCols + \"|\")\n latex_head += \"\\\\multirow{2}{*}{Gate} & \\\\multirow{2}{*}{Angle} & \" + \\\n \"\\\\multicolumn{%d}{c|}{Angle between Rotation Axes} \\\\\\\\ \\cline{3-%d}\\n\" % (len(opLabels), nCols)\n latex_head += \" & & %s \\\\\\\\ \\hline\\n\" % (\" & \".join(map(str, opLabels)))\n\n table = _ReportTable(colHeadings, formatters,\n custom_header={'latex': latex_head}, confidence_region_info=confidence_region_info)\n\n formatters = [None, 'Pi'] + ['Pi'] * len(opLabels)\n\n rotnAxisAnglesQty = _ev(_reportables.Angles_btwn_rotn_axes(model),\n confidence_region_info)\n rotnAxisAngles, rotnAxisAnglesEB = rotnAxisAnglesQty.value_and_errorbar\n\n for i, gl in enumerate(opLabels):\n decomp, decompEB = decomps[i].value_and_errorbar # OLD\n rotnAngle = decomp.get('pi rotations', 'X')\n\n angles_btwn_rotn_axes = []\n for j, gl_other in enumerate(opLabels):\n decomp_other, _ = decomps[j].value_and_errorbar # OLD\n rotnAngle_other = decomp_other.get('pi rotations', 'X')\n\n if gl_other == gl:\n angles_btwn_rotn_axes.append((\"\", None))\n elif str(rotnAngle) == 'X' or abs(rotnAngle) < 1e-4 or \\\n str(rotnAngle_other) == 'X' or abs(rotnAngle_other) < 1e-4:\n angles_btwn_rotn_axes.append((\"--\", None))\n elif not _np.isnan(rotnAxisAngles[i, j]):\n if show_axis_angle_err_bars and rotnAxisAnglesEB is not None:\n angles_btwn_rotn_axes.append((rotnAxisAngles[i, j], rotnAxisAnglesEB[i, j]))\n else:\n angles_btwn_rotn_axes.append((rotnAxisAngles[i, j], None))\n else:\n angles_btwn_rotn_axes.append((\"X\", None))\n\n if confidence_region_info is None or decompEB is None: # decompEB is None when gate decomp failed\n rowData = [gl, (rotnAngle, None)] + angles_btwn_rotn_axes\n else:\n rowData = [gl, (rotnAngle, decompEB.get('pi rotations', 'X'))] + angles_btwn_rotn_axes\n table.add_row(rowData, formatters)\n\n table.finish()\n return table\n\n\nclass GateEigenvalueTable(WorkspaceTable):\n \"\"\"\n Table displaying, in a variety of ways, the eigenvalues of a Model's gates.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The Model\n\n target_model : Model, optional\n The target model. If given, the target's eigenvalue will\n be plotted alongside `model`'s gate eigenvalue, the\n \"relative eigenvalues\".\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n display : tuple\n A tuple of one or more of the allowed options (see below) which\n specify which columns are displayed in the table. If\n `target_model` is None, then `\"target\"`, `\"rel\"`, `\"log-rel\"`\n `\"relpolar\"`, `\"gidm\"`, and `\"giinf\"` will be silently ignored.\n\n - \"evals\" : the gate eigenvalues\n - \"target\" : the target gate eigenvalues\n - \"rel\" : the relative-gate eigenvalues\n - \"log-evals\" : the (complex) logarithm of the eigenvalues\n - \"log-rel\" : the (complex) logarithm of the relative eigenvalues\n - \"polar\": a polar plot of the gate eigenvalues\n - \"relpolar\" : a polar plot of the relative-gate eigenvalues\n - \"absdiff-evals\" : absolute difference w/target eigenvalues\n - \"infdiff-evals\" : 1-Re(z0.C*z) difference w/target eigenvalues\n - \"absdiff-log-evals\" : Re & Im differences in eigenvalue logarithms\n - \"evdm\" : the gauge-invariant \"eigenvalue diamond norm\" metric\n - \"evinf\" : the gauge-invariant \"eigenvalue infidelity\" metric\n\n virtual_ops : list, optional\n If not None, a list of `Circuit` objects specifying additional \"gates\"\n (i.e. processes) to compute eigenvalues of. Length-1 circuits are\n automatically discarded so they are not displayed twice.\n \"\"\"\n\n def __init__(self, ws, model, target_model=None,\n confidence_region_info=None,\n display=('evals', 'rel', 'log-evals', 'log-rel', 'polar', 'relpolar'),\n virtual_ops=None):\n \"\"\"\n Create table which lists and displays (using a polar plot)\n the eigenvalues of a model's gates.\n\n Parameters\n ----------\n model : Model\n The Model\n\n target_model : Model, optional\n The target model. If given, the target's eigenvalue will\n be plotted alongside `model`'s gate eigenvalue, the\n \"relative eigenvalues\".\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n display : tuple\n A tuple of one or more of the allowed options (see below) which\n specify which columns are displayed in the table. If\n `target_model` is None, then `\"target\"`, `\"rel\"`, `\"log-rel\"`\n `\"relpolar\"`, `\"gidm\"`, and `\"giinf\"` will be silently ignored.\n\n - \"evals\" : the gate eigenvalues\n - \"target\" : the target gate eigenvalues\n - \"rel\" : the relative-gate eigenvalues\n - \"log-evals\" : the (complex) logarithm of the eigenvalues\n - \"log-rel\" : the (complex) logarithm of the relative eigenvalues\n - \"polar\": a polar plot of the gate eigenvalues\n - \"relpolar\" : a polar plot of the relative-gate eigenvalues\n - \"absdiff-evals\" : absolute difference w/target eigenvalues\n - \"infdiff-evals\" : 1-Re(z0.C*z) difference w/target eigenvalues\n - \"absdiff-log-evals\" : Re & Im differences in eigenvalue logarithms\n - \"evdm\" : the gauge-invariant \"eigenvalue diamond norm\" metric\n - \"evinf\" : the gauge-invariant \"eigenvalue infidelity\" metric\n\n virtual_ops : list, optional\n If not None, a list of `Circuit` objects specifying additional \"gates\"\n (i.e. processes) to compute eigenvalues of. Length-1 circuits are\n automatically discarded so they are not displayed twice.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GateEigenvalueTable, self).__init__(ws, self._create, model,\n target_model,\n confidence_region_info, display,\n virtual_ops)\n\n def _create(self, model, target_model,\n confidence_region_info, display,\n virtual_ops):\n\n opLabels = model.primitive_op_labels # operation labels\n assert(isinstance(model, _models.ExplicitOpModel)), \"GateEigenvalueTable only works with explicit models\"\n\n colHeadings = ['Gate'] if (virtual_ops is None) else ['Gate or Germ']\n formatters = [None]\n for disp in display:\n if disp == \"evals\":\n colHeadings.append('Eigenvalues ($E$)')\n formatters.append(None)\n\n elif disp == \"target\":\n if target_model is not None: # silently ignore\n colHeadings.append('Target Evals. ($T$)')\n formatters.append(None)\n\n elif disp == \"rel\":\n if target_model is not None: # silently ignore\n colHeadings.append('Rel. Evals ($R$)')\n formatters.append(None)\n\n elif disp == \"log-evals\":\n colHeadings.append('Re log(E)')\n colHeadings.append('Im log(E)')\n formatters.append('MathText')\n formatters.append('MathText')\n\n elif disp == \"log-rel\":\n colHeadings.append('Re log(R)')\n colHeadings.append('Im log(R)')\n formatters.append('MathText')\n formatters.append('MathText')\n\n elif disp == \"polar\":\n colHeadings.append('Eigenvalues') # Note: make sure header is *distinct* for pandas conversion\n formatters.append(None)\n\n elif disp == \"relpolar\":\n if(target_model is not None): # silently ignore\n colHeadings.append('Rel. Evals') # Note: make sure header is *distinct* for pandas conversion\n formatters.append(None)\n\n elif disp == \"absdiff-evals\":\n if(target_model is not None): # silently ignore\n colHeadings.append('|E - T|')\n formatters.append('MathText')\n\n elif disp == \"infdiff-evals\":\n if(target_model is not None): # silently ignore\n colHeadings.append('1.0 - Re(\\\\bar{T}*E)')\n formatters.append('MathText')\n\n elif disp == \"absdiff-log-evals\":\n if(target_model is not None): # silently ignore\n colHeadings.append('|Re(log E) - Re(log T)|')\n colHeadings.append('|Im(log E) - Im(log T)|')\n formatters.append('MathText')\n formatters.append('MathText')\n\n elif disp == \"evdm\":\n if(target_model is not None): # silently ignore\n colHeadings.append('Eigenvalue Diamond norm')\n formatters.append('Conversion')\n\n elif disp == \"evinf\":\n if(target_model is not None): # silently ignore\n colHeadings.append('Eigenvalue infidelity')\n formatters.append(None)\n else:\n raise ValueError(\"Invalid display element: %s\" % disp)\n\n table = _ReportTable(colHeadings, formatters, confidence_region_info=confidence_region_info)\n\n if virtual_ops is None:\n iterOver = opLabels\n else:\n iterOver = opLabels + tuple((v for v in virtual_ops if len(v) > 1))\n\n for gl in iterOver:\n #Note: gl may be a operation label (a string) or a Circuit\n row_data = [str(gl)]\n row_formatters = [None]\n\n #import time as _time #DEBUG\n #tStart = _time.time() #DEBUG\n fn = _reportables.GateEigenvalues if \\\n isinstance(gl, _baseobjs.Label) or isinstance(gl, str) else \\\n _reportables.CircuitEigenvalues\n evals = _ev(fn(model, gl), confidence_region_info)\n #tm = _time.time() - tStart #DEBUG\n #if tm > 0.01: print(\"DB: Gate eigenvalues in %gs\" % tm) #DEBUG\n\n evals = evals.reshape(evals.size, 1)\n #OLD: format to 2-columns - but polar plots are big, so just stick to 1col now\n #try: evals = evals.reshape(evals.size//2, 2) #assumes len(evals) is even!\n #except: evals = evals.reshape(evals.size, 1)\n\n if target_model is not None:\n #TODO: move this to a reportable qty to get error bars?\n\n if isinstance(gl, _baseobjs.Label) or isinstance(gl, str):\n # no error bars\n target_evals = _np.linalg.eigvals(target_model.operations[gl].to_dense(on_space='HilbertSchmidt'))\n else:\n target_evals = _np.linalg.eigvals(target_model.sim.product(gl)) # no error bars\n\n if any([(x in display) for x in ('rel', 'log-rel', 'relpolar')]):\n if isinstance(gl, _baseobjs.Label) or isinstance(gl, str):\n rel_evals = _ev(_reportables.Rel_gate_eigenvalues(model, target_model, gl),\n confidence_region_info)\n else:\n rel_evals = _ev(_reportables.Rel_circuit_eigenvalues(\n model, target_model, gl), confidence_region_info)\n\n # permute target eigenvalues according to min-weight matching\n _, pairs = _tools.minweight_match(evals.value, target_evals, lambda x, y: abs(x - y))\n matched_target_evals = target_evals.copy()\n for i, j in pairs:\n matched_target_evals[i] = target_evals[j]\n target_evals = matched_target_evals\n target_evals = target_evals.reshape(evals.value.shape)\n # b/c evals have shape (x,1) and targets (x,),\n # which causes problems when we try to subtract them\n\n for disp in display:\n if disp == \"evals\":\n row_data.append(evals)\n row_formatters.append('Normal')\n\n elif disp == \"target\" and target_model is not None:\n row_data.append(target_evals)\n row_formatters.append('Normal')\n\n elif disp == \"rel\" and target_model is not None:\n row_data.append(rel_evals)\n row_formatters.append('Normal')\n\n elif disp == \"log-evals\":\n logevals = evals.log()\n row_data.append(logevals.real())\n row_data.append(logevals.imag() / _np.pi)\n row_formatters.append('Normal')\n row_formatters.append('Pi')\n\n elif disp == \"log-rel\":\n log_relevals = rel_evals.log()\n row_data.append(log_relevals.real())\n row_data.append(log_relevals.imag() / _np.pi)\n row_formatters.append('Vec')\n row_formatters.append('Pi')\n\n elif disp == \"absdiff-evals\" and target_model is not None:\n absdiff_evals = evals.absdiff(target_evals)\n row_data.append(absdiff_evals)\n row_formatters.append('Vec')\n\n elif disp == \"infdiff-evals\" and target_model is not None:\n infdiff_evals = evals.infidelity_diff(target_evals)\n row_data.append(infdiff_evals)\n row_formatters.append('Vec')\n\n elif disp == \"absdiff-log-evals\" and target_model is not None:\n log_evals = evals.log()\n re_diff, im_diff = log_evals.absdiff(_np.log(target_evals.astype(complex)), separate_re_im=True)\n row_data.append(re_diff)\n row_data.append((im_diff / _np.pi).mod(2.0))\n row_formatters.append('Vec')\n row_formatters.append('Pi')\n\n elif disp == \"evdm\":\n if target_model is not None:\n fn = _reportables.Eigenvalue_diamondnorm if \\\n isinstance(gl, _baseobjs.Label) or isinstance(gl, str) else \\\n _reportables.Circuit_eigenvalue_diamondnorm\n gidm = _ev(fn(model, target_model, gl), confidence_region_info)\n row_data.append(gidm)\n row_formatters.append('Normal')\n\n elif disp == \"evinf\":\n if target_model is not None:\n fn = _reportables.Eigenvalue_entanglement_infidelity if \\\n isinstance(gl, _baseobjs.Label) or isinstance(gl, str) else \\\n _reportables.Circuit_eigenvalue_entanglement_infidelity\n giinf = _ev(fn(model, target_model, gl), confidence_region_info)\n row_data.append(giinf)\n row_formatters.append('Normal')\n\n elif disp == \"polar\":\n evals_val = evals.value\n if target_model is None:\n fig = _wp.PolarEigenvaluePlot(\n self.ws, [evals_val], [\"blue\"], center_text=str(gl))\n else:\n fig = _wp.PolarEigenvaluePlot(\n self.ws, [target_evals, evals_val],\n [\"black\", \"blue\"], [\"target\", \"gate\"], center_text=str(gl))\n row_data.append(fig)\n row_formatters.append('Figure')\n\n elif disp == \"relpolar\" and target_model is not None:\n rel_evals_val = rel_evals.value\n fig = _wp.PolarEigenvaluePlot(\n self.ws, [rel_evals_val], [\"red\"], [\"rel\"], center_text=str(gl))\n row_data.append(fig)\n row_formatters.append('Figure')\n table.add_row(row_data, row_formatters)\n\n #Iterate over instruments\n for il, inst in model.instruments.items():\n tinst = target_model.instruments[il]\n for comp_lbl, comp in inst.items():\n tcomp = tinst[comp_lbl]\n\n row_data = [il + \".\" + comp_lbl]\n row_formatters = [None]\n\n #FUTURE: use reportables to get instrument eigenvalues\n evals = _ReportableQty(_np.linalg.eigvals(comp.to_dense(on_space='HilbertSchmidt')))\n evals = evals.reshape(evals.size, 1)\n\n if target_model is not None:\n target_evals = _np.linalg.eigvals(tcomp.to_dense(on_space='HilbertSchmidt')) # no error bars\n #Note: no support for relative eigenvalues of instruments (yet)\n\n # permute target eigenvalues according to min-weight matching\n _, pairs = _tools.minweight_match(evals.value, target_evals, lambda x, y: abs(x - y))\n matched_target_evals = target_evals.copy()\n for i, j in pairs:\n matched_target_evals[i] = target_evals[j]\n target_evals = matched_target_evals\n target_evals = target_evals.reshape(evals.value.shape)\n # b/c evals have shape (x,1) and targets (x,),\n # which causes problems when we try to subtract them\n\n for disp in display:\n if disp == \"evals\":\n row_data.append(evals)\n row_formatters.append('Normal')\n\n elif disp == \"target\" and target_model is not None:\n row_data.append(target_evals)\n row_formatters.append('Normal')\n\n elif disp == \"rel\" and target_model is not None:\n row_data.append(_np.nan)\n row_formatters.append('Normal')\n\n elif disp == \"log-evals\":\n logevals = evals.log()\n row_data.append(logevals.real())\n row_data.append(logevals.imag() / _np.pi)\n row_formatters.append('Normal')\n row_formatters.append('Pi')\n\n elif disp == \"log-rel\":\n row_data.append(_np.nan)\n row_formatters.append('Normal')\n\n elif disp == \"absdiff-evals\":\n absdiff_evals = evals.absdiff(target_evals)\n row_data.append(absdiff_evals)\n row_formatters.append('Vec')\n\n elif disp == \"infdiff-evals\":\n infdiff_evals = evals.infidelity_diff(target_evals)\n row_data.append(infdiff_evals)\n row_formatters.append('Vec')\n\n elif disp == \"absdiff-log-evals\":\n log_evals = evals.log()\n re_diff, im_diff = log_evals.absdiff(_np.log(target_evals.astype(complex)), separate_re_im=True)\n row_data.append(re_diff)\n row_data.append((im_diff / _np.pi).mod(2.0))\n row_formatters.append('Vec')\n row_formatters.append('Pi')\n\n elif disp == \"evdm\":\n row_data.append(_np.nan)\n row_formatters.append('Normal')\n\n elif disp == \"evinf\":\n row_data.append(_np.nan)\n row_formatters.append('Normal')\n\n elif disp == \"polar\":\n evals_val = evals.value\n if target_model is None:\n fig = _wp.PolarEigenvaluePlot(\n self.ws, [evals_val], [\"blue\"], center_text=str(gl))\n else:\n fig = _wp.PolarEigenvaluePlot(\n self.ws, [target_evals, evals_val],\n [\"black\", \"blue\"], [\"target\", \"gate\"], center_text=str(gl))\n row_data.append(fig)\n row_formatters.append('Figure')\n\n elif disp == \"relpolar\" and target_model is not None:\n row_data.append(_np.nan)\n row_formatters.append('Normal')\n row_formatters.append('Figure')\n\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass DataSetOverviewTable(WorkspaceTable):\n \"\"\"\n Table giving a summary of the properties of `dataset`.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n dataset : DataSet\n The DataSet\n\n max_length_list : list of ints, optional\n A list of the maximum lengths used, if available.\n \"\"\"\n\n def __init__(self, ws, dataset, max_length_list=None):\n \"\"\"\n Create a table that gives a summary of the properties of `dataset`.\n\n Parameters\n ----------\n dataset : DataSet\n The DataSet\n\n max_length_list : list of ints, optional\n A list of the maximum lengths used, if available.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(DataSetOverviewTable, self).__init__(ws, self._create, dataset, max_length_list)\n\n def _create(self, dataset, max_length_list):\n\n colHeadings = ('Quantity', 'Value')\n formatters = (None, None)\n\n table = _ReportTable(colHeadings, formatters)\n\n minN = round(min([row.total for row in dataset.values()]))\n maxN = round(max([row.total for row in dataset.values()]))\n cntStr = \"[%d,%d]\" % (minN, maxN) if (minN != maxN) else \"%d\" % round(minN)\n\n table.add_row((\"Number of strings\", str(len(dataset))), (None, None))\n table.add_row((\"Gate labels\", \", \".join([str(gl) for gl in dataset.gate_labels()])), (None, None))\n table.add_row((\"Outcome labels\", \", \".join(map(str, dataset.outcome_labels))), (None, None))\n table.add_row((\"Counts per string\", cntStr), (None, None))\n\n if max_length_list is not None:\n table.add_row((\"Max. Lengths\", \", \".join(map(str, max_length_list))), (None, None))\n if hasattr(dataset, 'comment') and dataset.comment is not None:\n commentLines = dataset.comment.split('\\n')\n for i, commentLine in enumerate(commentLines, start=1):\n table.add_row((\"User comment %d\" % i, commentLine), (None, 'Verbatim'))\n\n table.finish()\n return table\n\n\nclass FitComparisonTable(WorkspaceTable):\n \"\"\"\n Table showing how the goodness-of-fit evolved over GST iterations\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n xs : list of integers\n List of X-values. Typically these are the maximum lengths or\n exponents used to index the different iterations of GST.\n\n circuits_by_x : list of (CircuitLists or lists of Circuits)\n Specifies the set of circuits used at each X.\n\n model_by_x : list of Models\n `Model`s corresponding to each X value.\n\n dataset : DataSet\n The data set to compare each model against.\n\n objfn_builder : ObjectiveFunctionBuilder or {\"logl\", \"chi2\"}, optional\n The objective function to use, or one of the given strings\n to use a defaut log-likelihood or chi^2 function.\n\n x_label : str, optional\n A label for the 'X' variable which indexes the different models.\n This string will be the header of the first table column.\n\n np_by_x : list of ints, optional\n A list of parameter counts to use for each X. If None, then\n the number of non-gauge parameters for each model is used.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n wildcard : WildcardBudget\n A wildcard budget to apply to the objective function (`objective`),\n which increases the goodness of fit by adjusting (by an amount\n measured in TVD) the probabilities produced by a model before\n comparing with the frequencies in `dataset`. Currently, this\n functionality is only supported for `objective == \"logl\"`.\n \"\"\"\n\n def __init__(self, ws, xs, circuits_by_x, model_by_x, dataset, objfn_builder='logl',\n x_label='L', np_by_x=None, comm=None, wildcard=None):\n \"\"\"\n Create a table showing how the chi^2 or log-likelihood changed with\n successive GST iterations.\n\n Parameters\n ----------\n xs : list of integers\n List of X-values. Typically these are the maximum lengths or\n exponents used to index the different iterations of GST.\n\n circuits_by_x : list of (CircuitLists or lists of Circuits)\n Specifies the set of circuits used at each X.\n\n model_by_x : list of Models\n `Model`s corresponding to each X value.\n\n dataset : DataSet\n The data set to compare each model against.\n\n objfn_builder : ObjectiveFunctionBuilder or {\"logl\", \"chi2\"}, optional\n The objective function to use, or one of the given strings\n to use a defaut log-likelihood or chi^2 function.\n\n x_label : str, optional\n A label for the 'X' variable which indexes the different models.\n This string will be the header of the first table column.\n\n np_by_x : list of ints, optional\n A list of parameter counts to use for each X. If None, then\n the number of non-gauge parameters for each model is used.\n\n comm : mpi4py.MPI.Comm, optional\n When not None, an MPI communicator for distributing the computation\n across multiple processors.\n\n wildcard : WildcardBudget\n A wildcard budget to apply to the objective function (`objective`),\n which increases the goodness of fit by adjusting (by an amount\n measured in TVD) the probabilities produced by a model before\n comparing with the frequencies in `dataset`. Currently, this\n functionality is only supported for `objective == \"logl\"`.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(FitComparisonTable, self).__init__(ws, self._create, xs, circuits_by_x, model_by_x,\n dataset, objfn_builder, x_label, np_by_x, comm,\n wildcard)\n\n def _create(self, xs, circuits_by_x, model_by_x, dataset, objfn_builder, x_label, np_by_x, comm, wildcard):\n\n if objfn_builder == \"chi2\" or (isinstance(objfn_builder, _objfns.ObjectiveFunctionBuilder)\n and objfn_builder.cls_to_build == _objfns.Chi2Function):\n colHeadings = {\n 'latex': (x_label, '$\\\\chi^2$', '$k$', '$\\\\chi^2-k$', '$\\sqrt{2k}$',\n '$N_\\\\sigma$', '$N_s$', '$N_p$', 'Rating'),\n 'html': (x_label, '&chi;<sup>2</sup>', 'k', '&chi;<sup>2</sup>-k',\n '&radic;<span style=\"text-decoration:overline;\">2k</span>',\n 'N<sub>sigma</sub>', 'N<sub>s</sub>', 'N<sub>p</sub>', 'Rating'),\n 'python': (x_label, 'chi^2', 'k', 'chi^2-k', 'sqrt{2k}', 'N_{sigma}', 'N_s', 'N_p', 'Rating')\n }\n\n elif objfn_builder == \"logl\" or (isinstance(objfn_builder, _objfns.ObjectiveFunctionBuilder)\n and objfn_builder.cls_to_build == _objfns.PoissonPicDeltaLogLFunction):\n colHeadings = {\n 'latex': (x_label, '$2\\Delta\\\\log(\\\\mathcal{L})$', '$k$', '$2\\Delta\\\\log(\\\\mathcal{L})-k$',\n '$\\sqrt{2k}$', '$N_\\\\sigma$', '$N_s$', '$N_p$', 'Rating'),\n 'html': (x_label, '2&Delta;(log L)', 'k', '2&Delta;(log L)-k',\n '&radic;<span style=\"text-decoration:overline;\">2k</span>',\n 'N<sub>sigma</sub>', 'N<sub>s</sub>', 'N<sub>p</sub>', 'Rating'),\n 'python': (x_label, '2*Delta(log L)', 'k', '2*Delta(log L)-k', 'sqrt{2k}',\n 'N_{sigma}', 'N_s', 'N_p', 'Rating')\n }\n else:\n raise ValueError(\"Invalid `objfn_builder` argument: %s\" % str(objfn_builder))\n\n if np_by_x is None:\n np_by_x = [mdl.num_modeltest_params for mdl in model_by_x]\n\n tooltips = ('', 'Difference in logL', 'number of degrees of freedom',\n 'difference between observed logl and expected mean',\n 'std deviation', 'number of std deviation', 'dataset dof',\n 'number of model parameters', '1-5 star rating (like Netflix)')\n table = _ReportTable(colHeadings, None, col_heading_labels=tooltips)\n\n for X, mdl, circuits, Np in zip(xs, model_by_x, circuits_by_x, np_by_x):\n Nsig, rating, fitQty, k, Ns, Np = self._ccompute(\n _ph.rated_n_sigma, dataset, mdl, circuits,\n objfn_builder, Np, wildcard, return_all=True,\n comm=comm) # self.ws.smartCache derived?\n table.add_row((str(X), fitQty, k, fitQty - k, _np.sqrt(2 * k), Nsig, Ns, Np, \"<STAR>\" * rating),\n (None, 'Normal', 'Normal', 'Normal', 'Normal', 'Rounded', 'Normal', 'Normal', 'Conversion'))\n\n table.finish()\n return table\n\n\nclass CircuitTable(WorkspaceTable):\n \"\"\"\n Table which simply displays list(s) of circuits\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n circuit_lists : Circuit list or list of Circuit lists\n List(s) of circuits to put in table.\n\n titles : string or list of strings\n The title(s) for the different string lists. These are displayed in\n the relevant table columns containing the strings.\n\n num_cols : int, optional\n The number of *data* columns, i.e. those containing\n circuits, for each string list.\n\n common_title : string, optional\n A single title string to place in a cell spanning across\n all the other column headers.\n \"\"\"\n\n def __init__(self, ws, circuit_lists, titles, num_cols=1, common_title=None):\n \"\"\"\n Creates a table of enumerating one or more sets of circuits.\n\n Parameters\n ----------\n circuit_lists : Circuit list or list of Circuit lists\n List(s) of circuits to put in table.\n\n titles : string or list of strings\n The title(s) for the different string lists. These are displayed in\n the relevant table columns containing the strings.\n\n num_cols : int, optional\n The number of *data* columns, i.e. those containing\n circuits, for each string list.\n\n common_title : string, optional\n A single title string to place in a cell spanning across\n all the other column headers.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(CircuitTable, self).__init__(ws, self._create, circuit_lists, titles,\n num_cols, common_title)\n\n def _create(self, circuit_lists, titles, num_cols, common_title):\n\n if len(circuit_lists) == 0:\n circuit_lists = [[]]\n elif isinstance(circuit_lists[0], _Circuit) or \\\n (isinstance(circuit_lists[0], tuple) and isinstance(circuit_lists[0][0], str)):\n circuit_lists = [circuit_lists]\n\n if isinstance(titles, str): titles = [titles] * len(circuit_lists)\n\n colHeadings = (('#',) + tuple(titles)) * num_cols\n formatters = (('Conversion',) + ('Normal',) * len(titles)) * num_cols\n\n if common_title is None:\n table = _ReportTable(colHeadings, formatters)\n else:\n table = \"tabular\"\n colHeadings = ('\\\\#',) + tuple(titles)\n latex_head = \"\\\\begin{%s}[l]{%s}\\n\\hline\\n\" % (table, \"|c\" * len(colHeadings) + \"|\")\n latex_head += \" & \\multicolumn{%d}{c|}{%s} \\\\\\\\ \\hline\\n\" % (len(colHeadings) - 1, common_title)\n latex_head += \"%s \\\\\\\\ \\hline\\n\" % (\" & \".join(colHeadings))\n\n colHeadings = ('#',) + tuple(titles)\n html_head = '<table class=\"%(tableclass)s\" id=\"%(tableid)s\" ><thead>'\n html_head += '<tr><th></th><th colspan=\"%d\">%s</th></tr>\\n' % (len(colHeadings) - 1, common_title)\n html_head += \"<tr><th> %s </th></tr>\" % (\" </th><th> \".join(colHeadings))\n html_head += \"</thead><tbody>\"\n table = _ReportTable(colHeadings, formatters,\n custom_header={'latex': latex_head,\n 'html': html_head})\n\n formatters = (('Normal',) + ('Circuit',) * len(circuit_lists)) * num_cols\n\n maxListLength = max(list(map(len, circuit_lists)))\n nRows = (maxListLength + (num_cols - 1)) // num_cols # ceiling\n\n #for i in range( max([len(gsl) for gsl in circuit_lists]) ):\n for i in range(nRows):\n rowData = []\n for k in range(num_cols):\n l = i + nRows * k # index of circuit\n rowData.append(l + 1)\n for gsList in circuit_lists:\n if l < len(gsList):\n rowData.append(gsList[l])\n else:\n rowData.append(None) # empty string\n table.add_row(rowData, formatters)\n\n table.finish()\n return table\n\n\nclass GatesSingleMetricTable(WorkspaceTable):\n \"\"\"\n Table that compares the gates of many models to target models using a single metric (`metric`).\n\n This allows the model titles to be used as the row and column headers. The models\n must share the same gate labels.\n\n If `models` and `target_models` are 1D lists, then `rowtitles` and\n `op_label` should be left as their default values so that the\n operation labels are used as row headers.\n\n If `models` and `target_models` are 2D (nested) lists, then\n `rowtitles` should specify the row-titles corresponding to the outer list\n elements and `op_label` should specify a single operation label that names\n the gate being compared throughout the entire table.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n metric : str\n The abbreviation for the metric to use. Allowed values are:\n\n - \"inf\" : entanglement infidelity\n - \"agi\" : average gate infidelity\n - \"trace\" : 1/2 trace distance\n - \"diamond\" : 1/2 diamond norm distance\n - \"nuinf\" : non-unitary entanglement infidelity\n - \"nuagi\" : non-unitary entanglement infidelity\n - \"evinf\" : eigenvalue entanglement infidelity\n - \"evagi\" : eigenvalue average gate infidelity\n - \"evnuinf\" : eigenvalue non-unitary entanglement infidelity\n - \"evnuagi\" : eigenvalue non-unitary entanglement infidelity\n - \"evdiamond\" : eigenvalue 1/2 diamond norm distance\n - \"evnudiamond\" : eigenvalue non-unitary 1/2 diamond norm distance\n - \"frob\" : frobenius distance\n\n models : list\n A list or nested list-of-lists of models to compare with\n corresponding elements of `target_models`.\n\n target_models : list\n A list or nested list-of-lists of models to compare with\n corresponding elements of `models`.\n\n titles : list of strs\n A list of column titles used to describe elements of the\n innermost list(s) in `models`.\n\n rowtitles : list of strs, optional\n A list of row titles used to describe elements of the\n outer list in `models`. If None, then the operation labels\n are used.\n\n table_title : str, optional\n If not None, text to place in a top header cell which spans all the\n columns of the table.\n\n op_label : str, optional\n If not None, the single operation label to use for all comparisons\n computed in this table. This should be set when (and only when)\n `models` and `target_models` are 2D (nested) lists.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n \"\"\"\n\n def __init__(self, ws, metric, models, target_models, titles,\n rowtitles=None, table_title=None, op_label=None,\n confidence_region_info=None):\n \"\"\"\n Create a table comparing the gates of various models (`models`) to\n those of `target_models` using the metric named by `metric`.\n\n If `models` and `target_models` are 1D lists, then `rowtitles` and\n `op_label` should be left as their default values so that the\n operation labels are used as row headers.\n\n If `models` and `target_models` are 2D (nested) lists, then\n `rowtitles` should specify the row-titles corresponding to the outer list\n elements and `op_label` should specify a single operation label that names\n the gate being compared throughout the entire table.\n\n Parameters\n ----------\n metric : str\n The abbreviation for the metric to use. Allowed values are:\n\n - \"inf\" : entanglement infidelity\n - \"agi\" : average gate infidelity\n - \"trace\" : 1/2 trace distance\n - \"diamond\" : 1/2 diamond norm distance\n - \"nuinf\" : non-unitary entanglement infidelity\n - \"nuagi\" : non-unitary entanglement infidelity\n - \"evinf\" : eigenvalue entanglement infidelity\n - \"evagi\" : eigenvalue average gate infidelity\n - \"evnuinf\" : eigenvalue non-unitary entanglement infidelity\n - \"evnuagi\" : eigenvalue non-unitary entanglement infidelity\n - \"evdiamond\" : eigenvalue 1/2 diamond norm distance\n - \"evnudiamond\" : eigenvalue non-unitary 1/2 diamond norm distance\n - \"frob\" : frobenius distance\n\n models : list\n A list or nested list-of-lists of models to compare with\n corresponding elements of `target_models`.\n\n target_models : list\n A list or nested list-of-lists of models to compare with\n corresponding elements of `models`.\n\n titles : list of strs\n A list of column titles used to describe elements of the\n innermost list(s) in `models`.\n\n rowtitles : list of strs, optional\n A list of row titles used to describe elements of the\n outer list in `models`. If None, then the operation labels\n are used.\n\n table_title : str, optional\n If not None, text to place in a top header cell which spans all the\n columns of the table.\n\n op_label : str, optional\n If not None, the single operation label to use for all comparisons\n computed in this table. This should be set when (and only when)\n `models` and `target_models` are 2D (nested) lists.\n\n confidence_region_info : ConfidenceRegion, optional\n If not None, specifies a confidence-region\n used to display error intervals.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GatesSingleMetricTable, self).__init__(\n ws, self._create, metric, models, target_models, titles,\n rowtitles, table_title, op_label, confidence_region_info)\n\n def _create(self, metric, models, target_models, titles,\n rowtitles, table_title, op_label, confidence_region_info):\n\n if rowtitles is None:\n assert(op_label is None), \"`op_label` must be None when `rowtitles` is\"\n colHeadings = (\"Gate\",) + tuple(titles)\n else:\n colHeadings = (\"\",) + tuple(titles)\n\n nCols = len(colHeadings)\n formatters = [None] * nCols # [None] + ['ModelType']*(nCols-1)\n\n #latex_head = \"\\\\begin{tabular}[l]{%s}\\n\\hline\\n\" % (\"|c\" * nCols + \"|\")\n #latex_head += \"\\\\multirow{2}{*}{Gate} & \" + \\\n # \"\\\\multicolumn{%d}{c|}{%s} \\\\\\\\ \\cline{2-%d}\\n\" % (len(titles),niceNm,nCols)\n #latex_head += \" & \" + \" & \".join([mknice(t) for t in titles]) + \"\\\\\\\\ \\hline\\n\"\n #\n #html_head = '<table class=\"%(tableclass)s\" id=\"%(tableid)s\" ><thead>'\n #html_head += '<tr><th rowspan=\"2\"></th>' + \\\n # '<th colspan=\"%d\">%s</th></tr>\\n' % (len(titles),niceNm)\n #html_head += \"<tr><th>\" + \" </th><th> \".join([mknice(t) for t in titles]) + \"</th></tr>\\n\"\n #html_head += \"</thead><tbody>\"\n\n if table_title:\n latex_head = \"\\\\begin{tabular}[l]{%s}\\n\\hline\\n\" % (\"|c\" * nCols + \"|\")\n latex_head += \"\\\\multicolumn{%d}{c|}{%s} \\\\\\\\ \\cline{1-%d}\\n\" % (nCols, table_title, nCols)\n latex_head += \" & \".join(colHeadings) + \"\\\\\\\\ \\hline\\n\"\n\n html_head = '<table class=\"%(tableclass)s\" id=\"%(tableid)s\" ><thead>'\n html_head += '<tr><th colspan=\"%d\">%s</th></tr>\\n' % (nCols, table_title)\n html_head += \"<tr><th>\" + \" </th><th> \".join(colHeadings) + \"</th></tr>\\n\"\n html_head += \"</thead><tbody>\"\n\n table = _ReportTable(colHeadings, formatters,\n custom_header={'latex': latex_head,\n 'html': html_head})\n else:\n table = _ReportTable(colHeadings, formatters)\n\n row_formatters = [None] + ['Normal'] * len(titles)\n\n if rowtitles is None:\n assert(isinstance(target_models[0], _models.ExplicitOpModel)\n ), \"%s only works with explicit models\" % str(type(self))\n for gl in target_models[0].operations: # use first target's operation labels\n row_data = [gl]\n for mdl, gsTarget in zip(models, target_models):\n if mdl is None or gsTarget is None:\n qty = _ReportableQty(_np.nan)\n else:\n qty = _reportables.evaluate_opfn_by_name(\n metric, mdl, gsTarget, gl, confidence_region_info)\n row_data.append(qty)\n table.add_row(row_data, row_formatters)\n else:\n for rowtitle, gsList, tgsList in zip(rowtitles, models, target_models):\n row_data = [rowtitle]\n for mdl, gsTarget in zip(gsList, tgsList):\n if mdl is None or gsTarget is None:\n qty = _ReportableQty(_np.nan)\n else:\n qty = _reportables.evaluate_opfn_by_name(\n metric, mdl, gsTarget, op_label, confidence_region_info)\n row_data.append(qty)\n table.add_row(row_data, row_formatters)\n\n table.finish()\n return table\n\n\nclass StandardErrgenTable(WorkspaceTable):\n \"\"\"\n A table showing what the standard error generators' superoperator matrices look like.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model_dim : int\n The dimension of the model, which equals the number of\n rows (or columns) in a operation matrix (e.g., 4 for a single qubit).\n\n projection_type : {\"hamiltonian\", \"stochastic\"}\n The type of error generator projectors to create a table for.\n If \"hamiltonian\", then use the Hamiltonian generators which take a\n density matrix rho -> -i*[ H, rho ] for basis matrix H.\n If \"stochastic\", then use the Stochastic error generators which take\n rho -> P*rho*P for basis matrix P (recall P is self adjoint).\n\n projection_basis : {'std', 'gm', 'pp', 'qt'}\n Which basis is used to construct the error generators. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp)\n and Qutrit (qt).\n \"\"\"\n\n def __init__(self, ws, model_dim, projection_type,\n projection_basis):\n \"\"\"\n Create a table of the \"standard\" gate error generators, such as those\n which correspond to Hamiltonian or Stochastic errors. Each generator\n is shown as grid of colored boxes.\n\n Parameters\n ----------\n model_dim : int\n The dimension of the model, which equals the number of\n rows (or columns) in a operation matrix (e.g., 4 for a single qubit).\n\n projection_type : {\"hamiltonian\", \"stochastic\"}\n The type of error generator projectors to create a table for.\n If \"hamiltonian\", then use the Hamiltonian generators which take a\n density matrix rho -> -i*[ H, rho ] for basis matrix H.\n If \"stochastic\", then use the Stochastic error generators which take\n rho -> P*rho*P for basis matrix P (recall P is self adjoint).\n\n projection_basis : {'std', 'gm', 'pp', 'qt'}\n Which basis is used to construct the error generators. Allowed\n values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp)\n and Qutrit (qt).\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(StandardErrgenTable, self).__init__(\n ws, self._create, model_dim, projection_type,\n projection_basis)\n\n def _create(self, model_dim, projection_type,\n projection_basis):\n\n d2 = model_dim # number of projections == dim of gate\n d = int(_np.sqrt(d2)) # dim of density matrix\n nQubits = _np.log2(d)\n\n #Get a list of the d2 generators (in corresspondence with the\n # given basis matrices)\n lindbladMxs = _tools.std_error_generators(d2, projection_type,\n projection_basis) # in std basis\n\n if not _np.isclose(round(nQubits), nQubits):\n #Non-integral # of qubits, so just show as a single row\n yd, xd = 1, d\n xlabel = \"\"; ylabel = \"\"\n elif nQubits == 1:\n yd, xd = 1, 2 # y and x pauli-prod *basis* dimensions\n xlabel = \"Q1\"; ylabel = \"\"\n elif nQubits == 2:\n yd, xd = 2, 2\n xlabel = \"Q2\"; ylabel = \"Q1\"\n else:\n assert(d % 2 == 0)\n yd, xd = 2, d // 2\n xlabel = \"Q*\"; ylabel = \"Q1\"\n\n topright = \"%s \\\\ %s\" % (ylabel, xlabel) if (len(ylabel) > 0) else \"\"\n colHeadings = [topright] + \\\n [(\"%s\" % x) if len(x) else \"\"\n for x in _tools.basis_element_labels(projection_basis, xd**2)]\n rowLabels = [(\"%s\" % x) if len(x) else \"\"\n for x in _tools.basis_element_labels(projection_basis, yd**2)]\n\n xLabels = _tools.basis_element_labels(projection_basis, xd**2)\n yLabels = _tools.basis_element_labels(projection_basis, yd**2)\n\n table = _ReportTable(colHeadings, [\"Conversion\"] + [None] * (len(colHeadings) - 1))\n\n iCur = 0\n for i, ylabel in enumerate(yLabels):\n rowData = [rowLabels[i]]\n rowFormatters = [None]\n\n for xlabel in xLabels:\n projector = lindbladMxs[iCur]; iCur += 1\n projector = _tools.change_basis(projector, \"std\", projection_basis)\n m, M = -_np.max(_np.abs(projector)), _np.max(_np.abs(projector))\n fig = _wp.GateMatrixPlot(self.ws, projector, m, M,\n projection_basis, d)\n rowData.append(fig)\n rowFormatters.append('Figure')\n\n table.add_row(rowData, rowFormatters)\n\n table.finish()\n return table\n\n\nclass GaugeOptParamsTable(WorkspaceTable):\n \"\"\"\n Table of gauge optimization parameters\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n gaugeopt_args : dict or list\n A dictionary or list of dictionaries specifying values for\n zero or more of the *arguments* of pyGSTi's\n :func:`gaugeopt_to_target` function.\n \"\"\"\n\n def __init__(self, ws, gaugeopt_args):\n \"\"\"\n Create a table displaying a list of gauge\n optimzation parameters.\n\n Parameters\n ----------\n gaugeopt_args : dict or list\n A dictionary or list of dictionaries specifying values for\n zero or more of the *arguments* of pyGSTi's\n :func:`gaugeopt_to_target` function.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(GaugeOptParamsTable, self).__init__(ws, self._create, gaugeopt_args)\n\n def _create(self, gaugeopt_args):\n\n colHeadings = ('G-Opt Param', 'Value')\n formatters = ('Bold', 'Bold')\n\n if gaugeopt_args is False: # signals *no* gauge optimization\n goargs_list = [{'Method': \"No gauge optimization was performed\"}]\n else:\n goargs_list = [gaugeopt_args] if hasattr(gaugeopt_args, 'keys') \\\n else gaugeopt_args\n\n table = _ReportTable(colHeadings, formatters)\n\n for i, goargs in enumerate(goargs_list):\n pre = (\"%d: \" % i) if len(goargs_list) > 1 else \"\"\n if 'method' in goargs:\n table.add_row((\"%sMethod\" % pre, str(goargs['method'])), (None, None))\n if 'cptp_penalty_factor' in goargs and goargs['cptp_penalty_factor'] != 0:\n table.add_row((\"%sCP penalty factor\" % pre, str(goargs['cptp_penalty_factor'])), (None, None))\n if 'spam_penalty_factor' in goargs and goargs['spam_penalty_factor'] != 0:\n table.add_row((\"%sSPAM penalty factor\" % pre, str(goargs['spam_penalty_factor'])), (None, None))\n if 'gates_metric' in goargs:\n table.add_row((\"%sMetric for gate-to-target\" % pre, str(goargs['gates_metric'])), (None, None))\n if 'spam_metric' in goargs:\n table.add_row((\"%sMetric for SPAM-to-target\" % pre, str(goargs['spam_metric'])), (None, None))\n if 'item_weights' in goargs:\n if goargs['item_weights']:\n table.add_row(\n (\"%sItem weights\" % pre,\n \", \".join([(\"%s=%.2g\" % (k, v)) for k, v in goargs['item_weights'].items()])), (None, None))\n if 'gauge_group' in goargs:\n table.add_row((\"%sGauge group\" % pre, goargs['gauge_group'].name), (None, None))\n\n table.finish()\n return table\n\n\nclass MetadataTable(WorkspaceTable):\n \"\"\"\n Table of raw parameters, often taken directly from a `Results` object\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n model : Model\n The model (usually the final estimate of a GST computation) to\n show information for (e.g. the types of its gates).\n\n params: dict\n A parameter dictionary to display\n \"\"\"\n\n def __init__(self, ws, model, params):\n \"\"\"\n Create a table of parameters and options from a `Results` object.\n\n Parameters\n ----------\n model : Model\n The model (usually the final estimate of a GST computation) to\n show information for (e.g. the types of its gates).\n\n params: dict\n A parameter dictionary to display\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(MetadataTable, self).__init__(ws, self._create, model, params)\n\n def _create(self, model, params_dict):\n\n colHeadings = ('Quantity', 'Value')\n formatters = ('Bold', 'Bold')\n\n #custom latex header for maximum width imposed on 2nd col\n latex_head = \"\\\\begin{tabular}[l]{|c|p{3in}|}\\n\\hline\\n\"\n latex_head += \"\\\\textbf{Quantity} & \\\\textbf{Value} \\\\\\\\ \\hline\\n\"\n table = _ReportTable(colHeadings, formatters,\n custom_header={'latex': latex_head})\n\n for key in sorted(list(params_dict.keys())):\n if key in ['L,germ tuple base string dict', 'weights', 'profiler']: continue # skip these\n if key == 'gaugeOptParams':\n if isinstance(params_dict[key], dict):\n val = params_dict[key].copy()\n if 'targetModel' in val:\n del val['targetModel'] # don't print this!\n\n elif isinstance(params_dict[key], list):\n val = []\n for go_param_dict in params_dict[key]:\n if isinstance(go_param_dict, dict): # to ensure .copy() exists\n val.append(go_param_dict.copy())\n if 'targetModel' in val[-1]:\n del val[-1]['targetModel'] # don't print this!\n else:\n val = params_dict[key]\n table.add_row((key, str(val)), (None, 'Verbatim'))\n\n if isinstance(self, _models.ExplicitOpModel):\n for lbl, vec in model.preps.items():\n if isinstance(vec, _state.StaticState): paramTyp = \"static\"\n elif isinstance(vec, _state.FullState): paramTyp = \"full\"\n elif isinstance(vec, _state.TPState): paramTyp = \"TP\"\n else: paramTyp = \"unknown\" # pragma: no cover\n table.add_row((lbl + \" parameterization\", paramTyp), (None, 'Verbatim'))\n\n for povmlbl, povm in model.povms.items():\n if isinstance(povm, _povm.UnconstrainedPOVM): paramTyp = \"unconstrained\"\n elif isinstance(povm, _povm.TPPOVM): paramTyp = \"TP\"\n elif isinstance(povm, _povm.TensorProductPOVM): paramTyp = \"TensorProd\"\n else: paramTyp = \"unknown\" # pragma: no cover\n table.add_row((povmlbl + \" parameterization\", paramTyp), (None, 'Verbatim'))\n\n for lbl, vec in povm.items():\n if isinstance(vec, _povm.StaticPOVMEffect): paramTyp = \"static\"\n elif isinstance(vec, _povm.FullPOVMEffect): paramTyp = \"full\"\n elif isinstance(vec, _povm.ComplementPOVMEffect): paramTyp = \"Comp\"\n else: paramTyp = \"unknown\" # pragma: no cover\n table.add_row((\"> \" + lbl + \" parameterization\", paramTyp), (None, 'Verbatim'))\n\n for gl, gate in model.operations.items():\n if isinstance(gate, _op.StaticArbitraryOp): paramTyp = \"static\"\n elif isinstance(gate, _op.FullArbitraryOp): paramTyp = \"full\"\n elif isinstance(gate, _op.FullTPOp): paramTyp = \"TP\"\n elif isinstance(gate, _op.LinearlyParamArbitraryOp): paramTyp = \"linear\"\n elif isinstance(gate, _op.EigenvalueParamDenseOp): paramTyp = \"eigenvalue\"\n elif isinstance(gate, _op.ComposedOp): paramTyp = \"Composed\"\n else: paramTyp = \"unknown\" # pragma: no cover\n table.add_row((gl + \" parameterization\", paramTyp), (None, 'Verbatim'))\n\n table.finish()\n return table\n\n\nclass SoftwareEnvTable(WorkspaceTable):\n \"\"\"\n Table showing details about the current software environment.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n \"\"\"\n\n def __init__(self, ws):\n \"\"\"\n Create a table displaying the software environment relevant to pyGSTi.\n\n Returns\n -------\n ReportTable\n \"\"\"\n super(SoftwareEnvTable, self).__init__(ws, self._create)\n\n def _create(self):\n\n import platform\n\n def _get_package_version(module_name):\n \"\"\" Extract the current version of a python module \"\"\"\n if module_name == \"cvxopt\":\n #special case b/c cvxopt can be weird...\n try:\n mod = __import__(\"cvxopt.info\")\n return str(mod.info.version)\n except Exception: pass # try the normal way below\n\n try:\n mod = __import__(module_name)\n return str(mod.__version__)\n except ImportError: # pragma: no cover\n return \"missing\" # pragma: no cover\n except AttributeError: # pragma: no cover\n return \"ver?\" # pragma: no cover\n except Exception: # pragma: no cover\n return \"???\" # pragma: no cover\n\n colHeadings = ('Quantity', 'Value')\n formatters = ('Bold', 'Bold')\n\n #custom latex header for maximum width imposed on 2nd col\n latex_head = \"\\\\begin{tabular}[l]{|c|p{3in}|}\\n\\hline\\n\"\n latex_head += \"\\\\textbf{Quantity} & \\\\textbf{Value} \\\\\\\\ \\hline\\n\"\n table = _ReportTable(colHeadings, formatters,\n custom_header={'latex': latex_head})\n\n #Python package information\n from .._version import version as pygsti_version\n table.add_row((\"pyGSTi version\", str(pygsti_version)), (None, 'Verbatim'))\n\n packages = ['numpy', 'scipy', 'matplotlib', 'ply', 'cvxopt', 'cvxpy',\n 'nose', 'PIL', 'psutil']\n for pkg in packages:\n table.add_row((pkg, _get_package_version(pkg)), (None, 'Verbatim'))\n\n #Python information\n table.add_row((\"Python version\", str(platform.python_version())), (None, 'Verbatim'))\n table.add_row((\"Python type\", str(platform.python_implementation())), (None, 'Verbatim'))\n table.add_row((\"Python compiler\", str(platform.python_compiler())), (None, 'Verbatim'))\n table.add_row((\"Python build\", str(platform.python_build())), (None, 'Verbatim'))\n table.add_row((\"Python branch\", str(platform.python_branch())), (None, 'Verbatim'))\n table.add_row((\"Python revision\", str(platform.python_revision())), (None, 'Verbatim'))\n\n #Platform information\n (system, _, release, version, machine, processor) = platform.uname()\n table.add_row((\"Platform summary\", str(platform.platform())), (None, 'Verbatim'))\n table.add_row((\"System\", str(system)), (None, 'Verbatim'))\n table.add_row((\"Sys Release\", str(release)), (None, 'Verbatim'))\n table.add_row((\"Sys Version\", str(version)), (None, 'Verbatim'))\n table.add_row((\"Machine\", str(machine)), (None, 'Verbatim'))\n table.add_row((\"Processor\", str(processor)), (None, 'Verbatim'))\n\n table.finish()\n return table\n\n\nclass ProfilerTable(WorkspaceTable):\n \"\"\"\n Table of profiler timing information\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n profiler : Profiler\n The profiler object to extract timings from.\n\n sort_by : {\"time\", \"name\"}\n What the timer values should be sorted by.\n \"\"\"\n\n def __init__(self, ws, profiler, sort_by=\"time\"):\n \"\"\"\n Create a table of profiler timing information.\n\n Parameters\n ----------\n profiler : Profiler\n The profiler object to extract timings from.\n\n sort_by : {\"time\", \"name\"}\n What the timer values should be sorted by.\n \"\"\"\n super(ProfilerTable, self).__init__(ws, self._create, profiler, sort_by)\n\n def _create(self, profiler, sort_by):\n\n colHeadings = ('Label', 'Time (sec)')\n formatters = ('Bold', 'Bold')\n\n #custom latex header for maximum width imposed on 2nd col\n latex_head = \"\\\\begin{tabular}[l]{|c|p{3in}|}\\n\\hline\\n\"\n latex_head += \"\\\\textbf{Label} & \\\\textbf{Time} (sec) \\\\\\\\ \\hline\\n\"\n table = _ReportTable(colHeadings, formatters,\n custom_header={'latex': latex_head})\n\n if profiler is not None:\n if sort_by == \"name\":\n timerNames = sorted(list(profiler.timers.keys()))\n elif sort_by == \"time\":\n timerNames = sorted(list(profiler.timers.keys()),\n key=lambda x: -profiler.timers[x])\n else:\n raise ValueError(\"Invalid 'sort_by' argument: %s\" % sort_by)\n\n for nm in timerNames:\n table.add_row((nm, profiler.timers[nm]), (None, None))\n\n table.finish()\n return table\n\n\nclass WildcardBudgetTable(WorkspaceTable):\n \"\"\"\n Table of wildcard budget information.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n\n budget : WildcardBudget\n The wildcard budget object to extract timings from.\n \"\"\"\n\n def __init__(self, ws, budget):\n \"\"\"\n Create a table of wildcard budget information.\n\n Parameters\n ----------\n budget : WildcardBudget\n The wildcard budget object to extract timings from.\n \"\"\"\n super(WildcardBudgetTable, self).__init__(ws, self._create, budget)\n\n def _create(self, budget):\n\n colHeadings = ('Element', 'Description', 'Budget')\n formatters = ('Bold', 'Bold', 'Bold')\n\n #custom latex header for maximum width imposed on 2nd col\n table = _ReportTable(colHeadings, formatters)\n\n if budget is not None:\n for nm, (desc, val) in budget.description.items():\n table.add_row((nm, desc, val), (None, None, None))\n\n table.finish()\n return table\n\n\nclass ExampleTable(WorkspaceTable):\n \"\"\"\n Table used just as an example of what tables can do/look like for use within the \"Help\" section of reports.\n\n Parameters\n ----------\n ws : Workspace\n The containing (parent) workspace.\n \"\"\"\n\n def __init__(self, ws):\n \"\"\"A table showing how to use table features.\"\"\"\n super(ExampleTable, self).__init__(ws, self._create)\n\n def _create(self):\n colHeadings = [\"Hover over me...\", \"And me!\", \"Click the pig\"]\n tooltips = [\"This tooltip can give more information about what this column displays\",\n \"Unfortunately, we can't show nicely formatted math in these tooltips (yet)\",\n \"Click on the pyGSTi logo below to create the non-automatically-generated plot; \"\n \"then hover over the colored boxes.\"]\n example_mx = _np.array([[1.0, 1 / 3, -1 / 3, -1.0],\n [1 / 3, 1.0, 0.0, -1 / 5],\n [-1 / 3, 0.0, 1.0, 1 / 6],\n [-1.0, -1 / 5, 1 / 6, 1.0]])\n example_ebmx = _np.abs(example_mx) * 0.05\n example_fig = _wp.GateMatrixPlot(self.ws, example_mx, -1.0, 1.0,\n \"pp\", eb_matrix=example_ebmx)\n\n table = _ReportTable(colHeadings, None, col_heading_labels=tooltips)\n table.add_row((\"Pi\", _np.pi, example_fig), ('Normal', 'Normal', 'Figure'))\n table.finish()\n return table\n" ]
[ [ "numpy.dot", "numpy.log2", "numpy.sqrt", "numpy.allclose", "numpy.arange", "numpy.kron", "numpy.linalg.norm", "scipy.linalg.expm", "numpy.real", "numpy.identity", "numpy.array", "numpy.zeros" ], [ "numpy.dot", "numpy.log2", "numpy.sqrt", "numpy.abs", "numpy.allclose", "numpy.linalg.inv", "numpy.isnan", "numpy.linalg.norm", "numpy.identity", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OmarJabri7/SAIA
[ "54dfee4684dbfd5bf6cb58cc3974abc051022022" ]
[ "data/get_data.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom nltk.corpus import words\nimport nltk\nimport re\nimport string\nfrom data_processing import DisasterProcessor\n\nX = pd.read_csv(\"emotion_data/tweet_emotions.csv\")\n\nstop_wrds = nltk.corpus.stopwords.words(\"english\")\ncolumns = X.columns\ncolumns = [\"content\"]\npreprocessor = DisasterProcessor()\neng_words = set(words.words())\nfor column in columns:\n X[column] = X[column].apply(\n lambda x: ' '.join([re.sub(\"[$@&#]\",\"\",w) for w in x.lower().split(\" \") if w]))\n table = str.maketrans('', '', string.punctuation)\n X[column] = X[column].apply(\n lambda x: ' '.join([w.translate(table) for w in x.split(\" \") if w.isalpha()]))\n X[column] = X[column].apply(\n lambda x: preprocessor.utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_wrds))\n X[column] = X[column].apply(\n lambda x: ' '.join([w for w in x.split(\" \") if len(w) >= 2]))\n\nX[\"content\"] = X[\"content\"].apply(\n lambda x: ' '.join(([w for w in x.split(\" \") if w in eng_words]))\n)\nunique_words = list(X['content'].str.split(' ', expand=True).stack().unique())\n# X.Sentence = X.Sentence.apply(lambda x: x if len(x) > 2 else np.nan)\n\n# X[\"clean_content\"] = X[\"content\"].str.replace('[#,@,&,=,[,http://]', '')\n\nprint(np.unique(X[\"sentiment\"]))\n\nX = X.loc[X['sentiment'].isin(['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger',\n 'surprise', 'worry'])]\n\n# X = X[\"sentiment\" in ['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger']]\n\nX = X[['sentiment','content']]\n\n# happy = X.loc[X['sentiment'].isin(['happiness','fun','enthusiasm','relief']), 'content'].values\n\nhappy = X.loc[X['sentiment'].isin(['happiness']), 'content'].values\n\nlove = X.loc[X['sentiment'].isin(['love']),'content'].values\n\n# sadness = X.loc[X['sentiment'].isin(['sadness','worry']), 'content'].values\n\nsadness = X.loc[X['sentiment'].isin(['sadness']), 'content'].values\n\n# angry = X.loc[X['sentiment'].isin(['hate','anger']), 'content'].values\n\nangry = X.loc[X['sentiment'].isin(['anger']), 'content'].values\n\nsurprise = X.loc[X['sentiment'].isin(['surprise']), 'content'].values\n\nfear = X.loc[X['sentiment'].isin(['fear']),'content'].values\n\n# emotions = dict(Emotion = ['happy','love','sadness','angry','surprise','fear'])\n# data = {\"Sentence\" : [happy, love, sadness, angry, surprise, fear],\n# \"Emotion\" : ['joy','love','sadness','anger','surprise','fear'],}\n#\ndata = {\"Sentence\" : [sadness, angry, fear],\n \"Emotion\" : ['sadness','anger','fear'],}\n\nnew_df = pd.DataFrame(data)\n\nnew_df = new_df.explode('Sentence', ignore_index=True)\n\nnew_df.to_csv('emotion_data/add_data.txt', header=None, index=None, sep=';')\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
AustinHellerRepo/GameManager
[ "2eee8e821f551b4683e59ea8cde7e61c26cf8878" ]
[ "test/latency_position_test.py" ]
[ "from __future__ import annotations\nimport unittest\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom typing import List, Tuple, Dict, Set, Callable, Type\n\n\nclass Dot():\n\n\tdef __init__(self, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float]):\n\n\t\tself.__position = position\n\t\tself.__velocity = velocity\n\t\tself.__acceleration = acceleration\n\t\tself.__time_index_offset = 0\n\n\t\tself.__acceleration_delta = None # type: Tuple[float, float]\n\t\tself.__acceleration_delta_end_time_index = None # type: float\n\t\tself.__acceleration_delta_end_time_index_acceleration = None # type: Tuple[float, float]\n\n\tdef set_positiion(self, *, position: Tuple[float, float]):\n\t\tself.__position = position\n\n\tdef set_velocity(self, *, velocity: Tuple[float, float]):\n\t\tself.__velocity = velocity\n\n\tdef set_acceleration(self, *, acceleration: Tuple[float, float]):\n\t\tself.__acceleration = acceleration\n\n\tdef get_position(self, *, time_index: float) -> Tuple[float, float]:\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tposition = list(self.__position)\n\t\tfor dimension_index in range(len(position)):\n\t\t\tposition[dimension_index] += self.__velocity[dimension_index] * calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index is None:\n\t\t\t\tposition[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0\n\t\t\telse:\n\t\t\t\tif calculated_time_index < self.__acceleration_delta_end_time_index:\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index ** 3) / 6.0\n\t\t\t\telse:\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index ** 2) / 2.0\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) ** 2) / 2.0\n\t\t\t\t\tposition[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index ** 3) / 6.0\n\t\treturn tuple(position)\n\n\tdef get_velocity(self, *, time_index: float) -> Tuple[float, float]:\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tvelocity = list(self.__velocity)\n\t\tfor dimension_index in range(len(velocity)):\n\t\t\tif self.__acceleration_delta_end_time_index is None:\n\t\t\t\tvelocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index\n\t\t\telse:\n\t\t\t\tif calculated_time_index < self.__acceleration_delta_end_time_index:\n\t\t\t\t\tvelocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index\n\t\t\t\t\tvelocity[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index**2) / 2.0\n\t\t\t\telse:\n\t\t\t\t\tvelocity[dimension_index] += self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index\n\t\t\t\t\tvelocity[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index)\n\t\t\t\t\tvelocity[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index**2) / 2.0\n\t\treturn tuple(velocity)\n\n\tdef get_acceleration(self, *, time_index: float) -> Tuple[float, float]:\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tacceleration = [0] * len(self.__position)\n\t\tfor dimension_index in range(len(acceleration)):\n\t\t\tif self.__acceleration_delta_end_time_index is None:\n\t\t\t\tacceleration[dimension_index] += self.__acceleration[dimension_index]\n\t\t\telse:\n\t\t\t\tif calculated_time_index < self.__acceleration_delta_end_time_index:\n\t\t\t\t\tacceleration[dimension_index] += self.__acceleration[dimension_index]\n\t\t\t\t\tacceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index)\n\t\t\t\telse:\n\t\t\t\t\tacceleration[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index]\n\t\t\t\t\tacceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index)\n\t\treturn tuple(self.__acceleration)\n\n\tdef bounce(self, *, time_index: float):\n\t\tbounce_position = self.get_position(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tbounce_velocity = self.get_velocity(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tbounce_acceleration = self.get_acceleration(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__position = bounce_position\n\t\tself.__velocity = (bounce_velocity[0], -bounce_velocity[1])\n\t\tself.__acceleration = bounce_acceleration\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tif self.__acceleration_delta_end_time_index is not None:\n\t\t\tself.__acceleration_delta_end_time_index -= calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index <= 0:\n\t\t\t\tself.__acceleration_delta = None\n\t\t\t\tself.__acceleration_delta_end_time_index = None\n\t\t\t\tself.__acceleration_delta_end_time_index_acceleration = None\n\t\tself.__time_index_offset = -time_index\n\n\tdef reflect(self, *, time_index: float):\n\t\treflect_position = self.get_position(\n\t\t\ttime_index=time_index\n\t\t)\n\t\treflect_velocity = self.get_velocity(\n\t\t\ttime_index=time_index\n\t\t)\n\t\treflect_acceleration = self.get_acceleration(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__position = reflect_position\n\t\tself.__velocity = (-reflect_velocity[0], reflect_velocity[1])\n\t\tself.__acceleration = reflect_acceleration\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tif self.__acceleration_delta_end_time_index is not None:\n\t\t\tself.__acceleration_delta_end_time_index -= calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index <= 0:\n\t\t\t\tself.__acceleration_delta = None\n\t\t\t\tself.__acceleration_delta_end_time_index = None\n\t\t\t\tself.__acceleration_delta_end_time_index_acceleration = None\n\t\tself.__time_index_offset = -time_index\n\n\tdef set_state(self, *, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float], time_index: float):\n\t\tself.__position = position\n\t\tself.__velocity = velocity\n\t\tself.__acceleration = acceleration\n\t\tcalculated_time_index = time_index + self.__time_index_offset\n\t\tif self.__acceleration_delta_end_time_index is not None:\n\t\t\tself.__acceleration_delta_end_time_index -= calculated_time_index\n\t\t\tif self.__acceleration_delta_end_time_index <= 0:\n\t\t\t\tself.__acceleration_delta = None\n\t\t\t\tself.__acceleration_delta_end_time_index = None\n\t\t\t\tself.__acceleration_delta_end_time_index_acceleration = None\n\t\tself.__time_index_offset = -time_index\n\n\tdef set_acceleration_delta(self, *, time_index: float, acceleration_delta: Tuple[float, float], end_time_index: float):\n\t\ttime_index_position = self.get_position(\n\t\t\ttime_index=time_index\n\t\t)\n\t\ttime_index_velocity = self.get_velocity(\n\t\t\ttime_index=time_index\n\t\t)\n\t\ttime_index_acceleration = self.get_acceleration(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__position = time_index_position\n\t\tself.__velocity = time_index_velocity\n\t\tself.__acceleration = time_index_acceleration\n\t\tself.__time_index_offset = -time_index\n\t\tself.__acceleration_delta = acceleration_delta\n\t\tself.__acceleration_delta_end_time_index = end_time_index\n\t\tself.__acceleration_delta_end_time_index_acceleration = time_index_acceleration\n\n\tdef merge(self, *, dot: Dot, current_time_index: float, merge_time_index_offset: float):\n\t\tself_position = self.get_position(\n\t\t\ttime_index=current_time_index\n\t\t)\n\t\tself_velocity = self.get_velocity(\n\t\t\ttime_index=current_time_index\n\t\t)\n\t\tdestination_position = dot.get_position(\n\t\t\ttime_index=current_time_index + merge_time_index_offset\n\t\t)\n\t\tdestination_velocity = dot.get_velocity(\n\t\t\ttime_index=current_time_index + merge_time_index_offset\n\t\t)\n\t\tdestination_acceleration = dot.get_acceleration(\n\t\t\ttime_index=current_time_index + merge_time_index_offset\n\t\t)\n\n\t\tacceleration_delta = []\n\t\tacceleration = []\n\t\tfor dimension_index in range(len(self.__position)):\n\t\t\ttemp_acceleration_delta = (-12 * destination_position[dimension_index] + 6 * destination_velocity[dimension_index] * merge_time_index_offset + 12 * self_position[dimension_index] + 6 * self_velocity[dimension_index] * merge_time_index_offset) / (merge_time_index_offset**3)\n\t\t\ttemp_acceleration = (destination_velocity[dimension_index] - self_velocity[dimension_index]) / merge_time_index_offset - 0.5 * temp_acceleration_delta * merge_time_index_offset\n\t\t\tacceleration_delta.append(temp_acceleration_delta)\n\t\t\tacceleration.append(temp_acceleration)\n\n\t\tself.__position = self_position\n\t\tself.__velocity = self_velocity\n\t\tself.__acceleration = tuple(acceleration)\n\t\tself.__acceleration_delta = tuple(acceleration_delta)\n\t\tself.__acceleration_delta_end_time_index = merge_time_index_offset\n\t\tself.__acceleration_delta_end_time_index_acceleration = destination_acceleration\n\t\tself.__time_index_offset = -current_time_index\n\n\nclass DotPlotter():\n\n\tdef __init__(self, minimum_position: Tuple[float, float], maximum_position: Tuple[float, float]):\n\n\t\tself.__minimum_position = minimum_position\n\t\tself.__maximum_position = maximum_position\n\n\t\tself.__dots = [] # type: List[Dot]\n\n\t\tself.__x = []\n\t\tself.__y = []\n\t\tself.__figure = None\n\t\tself.__scatter = None\n\n\tdef add_dot(self, *, dot: Dot):\n\t\tself.__dots.append(dot)\n\n\tdef __get_scatter(self, *, time_index: float) -> Tuple[List[float], List[float]]:\n\t\tscatter = ([], [])\n\t\tfor dot in self.__dots:\n\t\t\tposition = dot.get_position(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\n\t\t\tif position[1] < self.__minimum_position[1]:\n\t\t\t\tdot.bounce(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\tif position[0] < self.__minimum_position[0] or position[0] > self.__maximum_position[0]:\n\t\t\t\tdot.reflect(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\n\t\t\tscatter[0].append(position[0])\n\t\t\tscatter[1].append(position[1])\n\n\t\t\tprint(f\"position: {position}\")\n\n\t\treturn scatter\n\n\tdef show(self):\n\t\tplt.ion()\n\t\tself.__figure, ax = plt.subplots()\n\t\tself.__scatter = ax.scatter(self.__x, self.__y, facecolors=\"none\", edgecolors=[\"black\", \"red\"], s=10)\n\t\tplt.xlim(self.__minimum_position[0], self.__maximum_position[0])\n\t\tplt.ylim(self.__minimum_position[1], self.__maximum_position[1])\n\t\tplt.draw()\n\n\tdef refresh(self, *, time_index: float):\n\t\tx, y = self.__get_scatter(\n\t\t\ttime_index=time_index\n\t\t)\n\t\tself.__x.clear()\n\t\tself.__x.extend(x)\n\t\tself.__y.clear()\n\t\tself.__y.extend(y)\n\t\tself.__scatter.set_offsets(np.c_[self.__x, self.__y])\n\t\tself.__figure.canvas.draw_idle()\n\t\tplt.pause(0.01)\n\n\nclass LatencyPositionTest(unittest.TestCase):\n\n\tdef test_initialize(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tself.assertIsNotNone(dot_plotter)\n\n\tdef test_move_dot_along_path(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tdot = Dot(\n\t\t\tposition=(1, 9),\n\t\t\tvelocity=(1, 0),\n\t\t\tacceleration=(0, -1)\n\t\t)\n\n\t\tdot_plotter.add_dot(\n\t\t\tdot=dot\n\t\t)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\twhile time_index < 20.0:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_dot_along_path_in_separate_windows(self):\n\n\t\tdot_plotters_total = 2\n\t\tdot_plotters = []\n\n\t\tfor dot_plotter_index in range(dot_plotters_total):\n\t\t\tdot_plotter = DotPlotter(\n\t\t\t\tminimum_position=(0, 0),\n\t\t\t\tmaximum_position=(10, 10)\n\t\t\t)\n\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdot_plotter.show()\n\n\t\t\tdot_plotters.append(dot_plotter)\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\twhile time_index < 10.0:\n\t\t\tfor dot_plotter in dot_plotters:\n\t\t\t\tdot_plotter.refresh(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_dot_along_path_then_alter_state(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tdot = Dot(\n\t\t\tposition=(1, 9),\n\t\t\tvelocity=(1, 0),\n\t\t\tacceleration=(0, -1)\n\t\t)\n\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dot\n\t\t\tdot.set_state(\n\t\t\t\tposition=dot.get_position(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t),\n\t\t\t\tvelocity=(-1, 1),\n\t\t\t\tacceleration=(0, -1),\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\n\t\tdot_plotter.add_dot(\n\t\t\tdot=dot\n\t\t)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 20.0\n\t\tis_altered = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > maximum_time_index / 2.0:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_dot_along_path_then_set_acceleration_delta(self):\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tdot = Dot(\n\t\t\tposition=(1, 9),\n\t\t\tvelocity=(1, 0),\n\t\t\tacceleration=(0, -1)\n\t\t)\n\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dot\n\t\t\tdot.set_acceleration_delta(\n\t\t\t\ttime_index=time_index,\n\t\t\t\tacceleration_delta=(0, 0.5),\n\t\t\t\tend_time_index=5.0\n\t\t\t)\n\n\t\tdot_plotter.add_dot(\n\t\t\tdot=dot\n\t\t)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 30.0\n\t\talter_time_index = 10.0\n\t\tis_altered = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > alter_time_index:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_two_dots_along_path_in_same_windows(self):\n\n\t\tdots_total = 2\n\t\tdots = []\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tfor dot_index in range(dots_total):\n\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(dot_index + 1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdots.append(dot)\n\n\t\tdot_plotter.show()\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 20.0\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_two_dots_along_path_in_same_windows_but_first_gets_acceleration_delta(self):\n\n\t\tdots_total = 2\n\t\tdots = []\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tfor dot_index in range(dots_total):\n\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdots.append(dot)\n\n\t\tdot_plotter.show()\n\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dots\n\t\t\tdots[0].set_acceleration_delta(\n\t\t\t\ttime_index=time_index,\n\t\t\t\tacceleration_delta=(0, 0.5),\n\t\t\t\tend_time_index=5.0\n\t\t\t)\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.05\n\t\tmaximum_time_index = 30.0\n\t\talter_time_index = 10.0\n\t\tis_altered = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > alter_time_index:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\n\t\tplt.waitforbuttonpress()\n\n\tdef test_move_two_dots_along_path_in_same_windows_second_merges_specific_time_index_after_first_altered(self):\n\n\t\tdots_total = 2\n\t\tdots = []\n\n\t\tdot_plotter = DotPlotter(\n\t\t\tminimum_position=(0, 0),\n\t\t\tmaximum_position=(10, 10)\n\t\t)\n\n\t\tfor dot_index in range(dots_total):\n\t\t\tdot = Dot(\n\t\t\t\tposition=(1, 9),\n\t\t\t\tvelocity=(1, 0),\n\t\t\t\tacceleration=(0, -1)\n\t\t\t)\n\n\t\t\tdot_plotter.add_dot(\n\t\t\t\tdot=dot\n\t\t\t)\n\n\t\t\tdots.append(dot)\n\n\t\tdot_plotter.show()\n\t\tdef alter_dot(*, time_index: float):\n\t\t\tnonlocal dots\n\t\t\tif False:\n\t\t\t\tdots[0].set_acceleration_delta(\n\t\t\t\t\ttime_index=time_index,\n\t\t\t\t\tacceleration_delta=(0, 0.5),\n\t\t\t\t\tend_time_index=1.0\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tdots[0].set_velocity(\n\t\t\t\t\tvelocity=(-1, 1)\n\t\t\t\t)\n\n\t\tdef merge_dot(*, time_index: float):\n\t\t\tnonlocal dots\n\t\t\tdots[1].merge(\n\t\t\t\tdot=dots[0],\n\t\t\t\tcurrent_time_index=time_index,\n\t\t\t\tmerge_time_index_offset=1.0\n\t\t\t)\n\n\t\tprint(f\"refreshing\")\n\n\t\ttime_index = 0.0\n\t\ttime_index_delta = 0.01\n\t\tmaximum_time_index = 30.0\n\t\talter_time_index = 10.0\n\t\tmerge_time_index = 11.0\n\t\tis_altered = False\n\t\tis_merged = False\n\t\twhile time_index < maximum_time_index:\n\t\t\tdot_plotter.refresh(\n\t\t\t\ttime_index=time_index\n\t\t\t)\n\t\t\ttime_index += time_index_delta\n\n\t\t\tif not is_altered and time_index > alter_time_index:\n\t\t\t\talter_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_altered = True\n\t\t\tif not is_merged and time_index > merge_time_index:\n\t\t\t\tmerge_dot(\n\t\t\t\t\ttime_index=time_index\n\t\t\t\t)\n\t\t\t\tis_merged = True\n\n\t\tplt.waitforbuttonpress()\n" ]
[ [ "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "matplotlib.pyplot.draw", "matplotlib.pyplot.xlim", "matplotlib.pyplot.waitforbuttonpress", "matplotlib.pyplot.ion", "matplotlib.pyplot.pause" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luisangel86a/tensorflow
[ "77ee5e02721ba797fe01d47019e6017d2bb09ab7" ]
[ "tensorflow/python/keras/optimizer_v2/utils.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Optimizer utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.distribute import central_storage_strategy\nfrom tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\nfrom tensorflow.python.distribute import reduce_util as ds_reduce_util\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.platform import tf_logging as logging\n\n\ndef all_reduce_sum_gradients(grads_and_vars):\n \"\"\"Returns all-reduced gradients aggregated via summation.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n\n Returns:\n A list of all-reduced gradients.\n \"\"\"\n grads_and_vars = list(grads_and_vars)\n filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n # We switch to a cross-replica context since there is a bug which causes\n # IndexedSlices to be converted to dense tensors when all-reduced in a\n # replica context.\n # TODO(b/150507409): Do not switch to a cross-replica context once the bug\n # is fixed.\n if filtered_grads_and_vars:\n reduced = distribute_ctx.get_replica_context().merge_call(\n _all_reduce_sum_fn, args=(filtered_grads_and_vars,))\n else:\n reduced = []\n # Copy 'reduced' but add None gradients back in\n reduced_with_nones = []\n reduced_pos = 0\n for g, _ in grads_and_vars:\n if g is None:\n reduced_with_nones.append(None)\n else:\n reduced_with_nones.append(reduced[reduced_pos])\n reduced_pos += 1\n assert reduced_pos == len(reduced), \"Failed to add all gradients\"\n return reduced_with_nones\n\n\ndef make_gradient_clipnorm_fn(clipnorm):\n \"\"\"Creates a gradient transformation function for clipping by norm.\"\"\"\n\n def gradient_clipnorm_fn(grads_and_vars):\n\n if isinstance(distribute_ctx.get_strategy(),\n central_storage_strategy.CentralStorageStrategy):\n raise ValueError(\n \"`clipnorm` is not supported with `CenteralStorageStrategy`\")\n\n clipped_grads_and_vars = [\n (clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars\n ]\n return clipped_grads_and_vars\n\n return gradient_clipnorm_fn\n\n\ndef make_gradient_clipvalue_fn(clipvalue):\n \"\"\"Creates a gradient transformation function for clipping by value.\"\"\"\n\n def gradient_clipvalue_fn(grads_and_vars):\n\n if isinstance(distribute_ctx.get_strategy(),\n central_storage_strategy.CentralStorageStrategy):\n raise ValueError(\n \"`clipvalue` is not supported with `CenteralStorageStrategy`\")\n\n clipped_grads_and_vars = [(clip_ops.clip_by_value(g, -clipvalue,\n clipvalue), v)\n for g, v in grads_and_vars]\n return clipped_grads_and_vars\n\n return gradient_clipvalue_fn\n\n\ndef filter_empty_gradients(grads_and_vars):\n \"\"\"Filter out `(grad, var)` pairs that have a gradient equal to `None`.\"\"\"\n grads_and_vars = tuple(grads_and_vars)\n if not grads_and_vars:\n return grads_and_vars\n\n filtered = []\n vars_with_empty_grads = []\n for grad, var in grads_and_vars:\n if grad is None:\n vars_with_empty_grads.append(var)\n else:\n filtered.append((grad, var))\n filtered = tuple(filtered)\n\n if not filtered:\n raise ValueError(\"No gradients provided for any variable: %s.\" %\n ([v.name for _, v in grads_and_vars],))\n if vars_with_empty_grads:\n logging.warning(\n (\"Gradients do not exist for variables %s when minimizing the loss.\"),\n ([v.name for v in vars_with_empty_grads]))\n return filtered\n\n\ndef _all_reduce_sum_fn(distribution, grads_and_vars):\n return distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM,\n grads_and_vars)\n" ]
[ [ "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.distribute.distribution_strategy_context.get_strategy", "tensorflow.python.ops.clip_ops.clip_by_norm", "tensorflow.python.ops.clip_ops.clip_by_value", "tensorflow.python.distribute.distribution_strategy_context.get_replica_context" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
qgallouedec/stable-baselines3
[ "a6f5049a99a4c21a6f0bcce458ca3306cef310e0" ]
[ "stable_baselines3/common/vec_env/subproc_vec_env.py" ]
[ "import multiprocessing as mp\nfrom collections import OrderedDict\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union\n\nimport gym\nimport numpy as np\n\nfrom stable_baselines3.common.vec_env.base_vec_env import (\n CloudpickleWrapper,\n VecEnv,\n VecEnvIndices,\n VecEnvObs,\n VecEnvStepReturn,\n)\n\n\ndef _worker(\n remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper\n) -> None:\n # Import here to avoid a circular import\n from stable_baselines3.common.env_util import is_wrapped\n\n parent_remote.close()\n env = env_fn_wrapper.var()\n while True:\n try:\n cmd, data = remote.recv()\n if cmd == \"step\":\n observation, reward, done, info = env.step(data)\n if done:\n # save final observation where user can get it, then reset\n info[\"terminal_observation\"] = observation\n observation = env.reset()\n remote.send((observation, reward, done, info))\n elif cmd == \"seed\":\n remote.send(env.seed(data))\n elif cmd == \"reset\":\n observation = env.reset()\n remote.send(observation)\n elif cmd == \"render\":\n remote.send(env.render(data))\n elif cmd == \"close\":\n env.close()\n remote.close()\n break\n elif cmd == \"get_spaces\":\n remote.send((env.observation_space, env.action_space))\n elif cmd == \"env_method\":\n method = getattr(env, data[0])\n remote.send(method(*data[1], **data[2]))\n elif cmd == \"get_attr\":\n remote.send(getattr(env, data))\n elif cmd == \"set_attr\":\n remote.send(setattr(env, data[0], data[1]))\n elif cmd == \"is_wrapped\":\n remote.send(is_wrapped(env, data))\n else:\n raise NotImplementedError(f\"`{cmd}` is not implemented in the worker\")\n except EOFError:\n break\n\n\nclass SubprocVecEnv(VecEnv):\n \"\"\"\n Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own\n process, allowing significant speed up when the environment is computationally complex.\n\n For performance reasons, if your environment is not IO bound, the number of environments should not exceed the\n number of logical cores on your CPU.\n\n .. warning::\n\n Only 'forkserver' and 'spawn' start methods are thread-safe,\n which is important when TensorFlow sessions or other non thread-safe\n libraries are used in the parent (see issue #217). However, compared to\n 'fork' they incur a small start-up cost and have restrictions on\n global variables. With those methods, users must wrap the code in an\n ``if __name__ == \"__main__\":`` block.\n For more information, see the multiprocessing documentation.\n\n :param env_fns: Environments to run in subprocesses\n :param start_method: method used to start the subprocesses.\n Must be one of the methods returned by multiprocessing.get_all_start_methods().\n Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.\n \"\"\"\n\n def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None):\n self.waiting = False\n self.closed = False\n n_envs = len(env_fns)\n\n if start_method is None:\n # Fork is not a thread safe method (see issue #217)\n # but is more user friendly (does not require to wrap the code in\n # a `if __name__ == \"__main__\":`)\n forkserver_available = \"forkserver\" in mp.get_all_start_methods()\n start_method = \"forkserver\" if forkserver_available else \"spawn\"\n ctx = mp.get_context(start_method)\n\n self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])\n self.processes = []\n for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):\n args = (work_remote, remote, CloudpickleWrapper(env_fn))\n # daemon=True: if the main process crashes, we should not cause things to hang\n process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error\n process.start()\n self.processes.append(process)\n work_remote.close()\n\n self.remotes[0].send((\"get_spaces\", None))\n observation_space, action_space = self.remotes[0].recv()\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n\n def step_async(self, actions: np.ndarray) -> None:\n for remote, action in zip(self.remotes, actions):\n remote.send((\"step\", action))\n self.waiting = True\n\n def step_wait(self) -> VecEnvStepReturn:\n results = [remote.recv() for remote in self.remotes]\n self.waiting = False\n obs, rews, dones, infos = zip(*results)\n return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos\n\n def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:\n if seed is None:\n seed = np.random.randint(0, 2**32 - 1)\n for idx, remote in enumerate(self.remotes):\n remote.send((\"seed\", seed + idx))\n return [remote.recv() for remote in self.remotes]\n\n def reset(self) -> VecEnvObs:\n for remote in self.remotes:\n remote.send((\"reset\", None))\n obs = [remote.recv() for remote in self.remotes]\n return _flatten_obs(obs, self.observation_space)\n\n def close(self) -> None:\n if self.closed:\n return\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send((\"close\", None))\n for process in self.processes:\n process.join()\n self.closed = True\n\n def get_images(self) -> Sequence[np.ndarray]:\n for pipe in self.remotes:\n # gather images from subprocesses\n # `mode` will be taken into account later\n pipe.send((\"render\", \"rgb_array\"))\n imgs = [pipe.recv() for pipe in self.remotes]\n return imgs\n\n def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:\n \"\"\"Return attribute from vectorized environment (see base class).\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"get_attr\", attr_name))\n return [remote.recv() for remote in target_remotes]\n\n def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:\n \"\"\"Set attribute inside vectorized environments (see base class).\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"set_attr\", (attr_name, value)))\n for remote in target_remotes:\n remote.recv()\n\n def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:\n \"\"\"Call instance methods of vectorized environments.\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"env_method\", (method_name, method_args, method_kwargs)))\n return [remote.recv() for remote in target_remotes]\n\n def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]:\n \"\"\"Check if worker environments are wrapped with a given wrapper\"\"\"\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send((\"is_wrapped\", wrapper_class))\n return [remote.recv() for remote in target_remotes]\n\n def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:\n \"\"\"\n Get the connection object needed to communicate with the wanted\n envs that are in subprocesses.\n\n :param indices: refers to indices of envs.\n :return: Connection object to communicate between processes.\n \"\"\"\n indices = self._get_indices(indices)\n return [self.remotes[i] for i in indices]\n\n\ndef _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: gym.spaces.Space) -> VecEnvObs:\n \"\"\"\n Flatten observations, depending on the observation space.\n\n :param obs: observations.\n A list or tuple of observations, one per environment.\n Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.\n :return: flattened observations.\n A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.\n Each NumPy array has the environment index as its first axis.\n \"\"\"\n assert isinstance(obs, (list, tuple)), \"expected list or tuple of observations per environment\"\n assert len(obs) > 0, \"need observations from at least one environment\"\n\n if isinstance(space, gym.spaces.Dict):\n assert isinstance(space.spaces, OrderedDict), \"Dict space must have ordered subspaces\"\n assert isinstance(obs[0], dict), \"non-dict observation for environment with Dict observation space\"\n return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])\n elif isinstance(space, gym.spaces.Tuple):\n assert isinstance(obs[0], tuple), \"non-tuple observation for environment with Tuple observation space\"\n obs_len = len(space.spaces)\n return tuple(np.stack([o[i] for o in obs]) for i in range(obs_len))\n else:\n return np.stack(obs)\n" ]
[ [ "numpy.stack", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rartino/httk-rsttools
[ "57c46362899105a72b3b6efc45b50bcda8e574a7" ]
[ "rstslide/plugins/Matplotlib/XKCDify.py" ]
[ "\"\"\"\nXKCD plot generator\n-------------------\nAuthor: Jake Vanderplas\n\nThis is a script that will take any matplotlib line diagram, and convert it\nto an XKCD-style plot. It will work for plots with line & text elements,\nincluding axes labels and titles (but not axes tick labels).\n\nThe idea for this comes from work by Damon McDougall\n http://www.mail-archive.com/[email protected]/msg25499.html\n\"\"\"\nimport os\n\nimport numpy as np\nimport pylab as pl\nfrom scipy import interpolate, signal\nimport matplotlib.font_manager as fm\n\nscript_path = os.path.dirname(os.path.abspath(__file__))\n\n# We need a special font for the code below. It can be downloaded this way:\n#import os\n#import urllib2\n#if not os.path.exists('Humor-Sans.ttf'):\n# print 'Downloading the font Humor-sans.'\n# fhandle = urllib2.urlopen('http://antiyawn.com/uploads/Humor-Sans.ttf')\n# open('Humor-Sans.ttf', 'wb').write(fhandle.read())\n\n\ndef xkcd_line(x, y, xlim=None, ylim=None,\n mag=1.0, f1=30, f2=0.05, f3=15):\n \"\"\"\n Mimic a hand-drawn line from (x, y) data\n\n Parameters\n ----------\n x, y : array_like\n arrays to be modified\n xlim, ylim : data range\n the assumed plot range for the modification. If not specified,\n they will be guessed from the data\n mag : float\n magnitude of distortions\n f1, f2, f3 : int, float, int\n filtering parameters. f1 gives the size of the window, f2 gives\n the high-frequency cutoff, f3 gives the size of the filter\n\n Returns\n -------\n x, y : ndarrays\n The modified lines\n \"\"\"\n x = np.asarray(x)\n y = np.asarray(y)\n\n # get limits for rescaling\n if xlim is None:\n xlim = (x.min(), x.max())\n if ylim is None:\n ylim = (y.min(), y.max())\n\n if xlim[1] == xlim[0]:\n xlim = ylim\n\n if ylim[1] == ylim[0]:\n ylim = xlim\n\n # scale the data\n x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])\n y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])\n\n # compute the total distance along the path\n dx = x_scaled[1:] - x_scaled[:-1]\n dy = y_scaled[1:] - y_scaled[:-1]\n dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))\n\n # number of interpolated points is proportional to the distance\n Nu = int(200 * dist_tot)\n u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)\n\n # interpolate curve at sampled points\n k = min(3, len(x) - 1)\n res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)\n x_int, y_int = interpolate.splev(u, res[0])\n\n # we'll perturb perpendicular to the drawn line\n dx = x_int[2:] - x_int[:-2]\n dy = y_int[2:] - y_int[:-2]\n dist = np.sqrt(dx * dx + dy * dy)\n\n # create a filtered perturbation\n coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)\n b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))\n response = signal.lfilter(b, 1, coeffs)\n\n x_int[1:-1] += response * dy / dist\n y_int[1:-1] += response * dx / dist\n\n # un-scale data\n x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]\n y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]\n\n return x_int, y_int\n\n\ndef XKCDify(ax, mag=1.0,\n f1=50, f2=0.01, f3=15,\n forecolor='k',\n bgcolor='w',\n xaxis_loc=None,\n yaxis_loc=None,\n xaxis_arrow='+',\n yaxis_arrow='+',\n ax_extend=0.1,\n expand_axes=False):\n \"\"\"Make axis look hand-drawn\n\n This adjusts all lines, text, legends, and axes in the figure to look\n like xkcd plots. Other plot elements are not modified.\n\n Parameters\n ----------\n ax : Axes instance\n the axes to be modified.\n mag : float\n the magnitude of the distortion\n f1, f2, f3 : int, float, int\n filtering parameters. f1 gives the size of the window, f2 gives\n the high-frequency cutoff, f3 gives the size of the filter\n xaxis_loc, yaxis_log : float\n The locations to draw the x and y axes. If not specified, they\n will be drawn from the bottom left of the plot\n xaxis_arrow, yaxis_arrow : str\n where to draw arrows on the x/y axes. Options are '+', '-', '+-', or ''\n ax_extend : float\n How far (fractionally) to extend the drawn axes beyond the original\n axes limits\n expand_axes : bool\n if True, then expand axes to fill the figure (useful if there is only\n a single axes in the figure)\n \"\"\"\n # Get axes aspect\n ext = ax.get_window_extent().extents\n aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])\n\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n\n xspan = xlim[1] - xlim[0]\n yspan = ylim[1] - xlim[0]\n\n xax_lim = (xlim[0] - ax_extend * xspan,\n xlim[1] + ax_extend * xspan)\n yax_lim = (ylim[0] - ax_extend * yspan,\n ylim[1] + ax_extend * yspan)\n\n if xaxis_loc is None:\n xaxis_loc = ylim[0]\n\n if yaxis_loc is None:\n yaxis_loc = xlim[0]\n\n # Draw axes\n xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],\n linestyle='-', color=forecolor)\n yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],\n linestyle='-', color=forecolor)\n\n # Label axes3, 0.5, 'hello', fontsize=14)\n ax.text(xax_lim[1], xaxis_loc - 0.05 * yspan, ax.get_xlabel(),\n fontsize=14, ha='right', va='top', rotation=5)\n ax.text(yaxis_loc - 0.05 * xspan, yax_lim[1], ax.get_ylabel(),\n fontsize=14, ha='right', va='top', rotation=85)\n ax.set_xlabel('')\n ax.set_ylabel('')\n\n # Add title\n ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],\n ax.get_title(),\n ha='center', va='bottom', fontsize=16)\n ax.set_title('')\n\n Nlines = len(ax.lines)\n lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]\n\n for line in lines:\n x, y = line.get_data()\n\n x_int, y_int = xkcd_line(x, y, xlim, ylim,\n mag, f1, f2, f3)\n\n # create foreground and background line\n lw = line.get_linewidth()\n line.set_linewidth(2 * lw)\n line.set_data(x_int, y_int)\n\n# # don't add background line for axes\n# if (line is not xaxis) and (line is not yaxis):\n# line_bg = pl.Line2D(x_int, y_int, color=bgcolor,\n# linewidth=8 * lw)\n# ax.add_line(line_bg)\n\n ax.add_line(line)\n\n # Draw arrow-heads at the end of axes lines\n arr1 = 0.03 * np.array([-1, 0, -1])\n arr2 = 0.02 * np.array([-1, 0, 1])\n\n arr1[::2] += np.random.normal(0, 0.005, 2)\n arr2[::2] += np.random.normal(0, 0.005, 2)\n\n x, y = xaxis.get_data()\n if '+' in str(xaxis_arrow):\n ax.plot(x[-1] + arr1 * xspan * aspect,\n y[-1] + arr2 * yspan,\n color=forecolor, lw=2)\n if '-' in str(xaxis_arrow):\n ax.plot(x[0] - arr1 * xspan * aspect,\n y[0] - arr2 * yspan,\n color=forecolor, lw=2)\n\n x, y = yaxis.get_data()\n if '+' in str(yaxis_arrow):\n ax.plot(x[-1] + arr2 * xspan * aspect,\n y[-1] + arr1 * yspan,\n color=forecolor, lw=2)\n if '-' in str(yaxis_arrow):\n ax.plot(x[0] - arr2 * xspan * aspect,\n y[0] - arr1 * yspan,\n color=forecolor, lw=2)\n\n # Change all the fonts to humor-sans.\n prop = fm.FontProperties(fname=os.path.join(script_path, 'fonts', 'Humor-Sans.ttf'), size=16)\n for text in ax.texts:\n text.set_fontproperties(prop)\n\n # modify legend\n leg = ax.get_legend()\n if leg is not None:\n leg.set_frame_on(False)\n\n for child in leg.get_children():\n if isinstance(child, pl.Line2D):\n x, y = child.get_data()\n child.set_data(xkcd_line(x, y, mag=1., f1=100, f2=0.001))\n child.set_linewidth(2 * child.get_linewidth())\n if isinstance(child, pl.Text):\n child.set_fontproperties(prop)\n\n # Set the axis limits\n ax.set_xlim(xax_lim[0] - 0.1 * xspan,\n xax_lim[1] + 0.1 * xspan)\n ax.set_ylim(yax_lim[0] - 0.1 * yspan,\n yax_lim[1] + 0.1 * yspan)\n\n # adjust the axes\n ax.set_xticks([])\n ax.set_yticks([])\n\n if expand_axes:\n ax.figure.set_facecolor(bgcolor)\n ax.set_axis_off()\n ax.set_position([0, 0, 1, 1])\n\n return ax\n" ]
[ [ "numpy.sqrt", "numpy.asarray", "numpy.arange", "scipy.interpolate.splprep", "scipy.interpolate.splev", "numpy.random.normal", "scipy.signal.lfilter", "numpy.array", "scipy.signal.firwin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
bubbliiiing/yolox-tf2
[ "0407c77858d436a6b370e591eea7963cc807f3b4" ]
[ "utils/utils_fit.py" ]
[ "import os\r\n\r\nimport tensorflow as tf\r\nfrom nets.yolo import get_yolo_loss\r\nfrom tqdm import tqdm\r\n\r\n\r\n#------------------------------#\r\n# 防止bug\r\n#------------------------------#\r\ndef get_train_step_fn(strategy):\r\n @tf.function\r\n def train_step(imgs, targets, net, yolo_loss, optimizer):\r\n with tf.GradientTape() as tape:\r\n #------------------------------#\r\n # 计算loss\r\n #------------------------------#\r\n P5_output, P4_output, P3_output = net(imgs, training=True)\r\n args = [P5_output, P4_output, P3_output] + [targets]\r\n \r\n loss_value = yolo_loss(args)\r\n #------------------------------#\r\n # 添加上l2正则化参数\r\n #------------------------------#\r\n loss_value = tf.reduce_sum(net.losses) + loss_value\r\n grads = tape.gradient(loss_value, net.trainable_variables)\r\n optimizer.apply_gradients(zip(grads, net.trainable_variables))\r\n return loss_value\r\n\r\n if strategy == None:\r\n return train_step\r\n else:\r\n #----------------------#\r\n # 多gpu训练\r\n #----------------------#\r\n @tf.function\r\n def distributed_train_step(imgs, targets, net, yolo_loss, optimizer):\r\n per_replica_losses = strategy.run(train_step, args=(imgs, targets, net, yolo_loss, optimizer,))\r\n return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,\r\n axis=None)\r\n return distributed_train_step\r\n\r\n#----------------------#\r\n# 防止bug\r\n#----------------------#\r\ndef get_val_step_fn(strategy):\r\n @tf.function\r\n def val_step(imgs, targets, net, yolo_loss, optimizer):\r\n #------------------------------#\r\n # 计算loss\r\n #------------------------------#\r\n P5_output, P4_output, P3_output = net(imgs, training=False)\r\n args = [P5_output, P4_output, P3_output] + [targets]\r\n loss_value = yolo_loss(args)\r\n #------------------------------#\r\n # 添加上l2正则化参数\r\n #------------------------------#\r\n loss_value = tf.reduce_sum(net.losses) + loss_value\r\n return loss_value\r\n if strategy == None:\r\n return val_step\r\n else:\r\n #----------------------#\r\n # 多gpu验证\r\n #----------------------#\r\n @tf.function\r\n def distributed_val_step(imgs, targets, net, yolo_loss, optimizer):\r\n per_replica_losses = strategy.run(val_step, args=(imgs, targets, net, yolo_loss, optimizer,))\r\n return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,\r\n axis=None)\r\n return distributed_val_step\r\n \r\ndef fit_one_epoch(net, yolo_loss, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, \r\n input_shape, num_classes, save_period, save_dir, strategy):\r\n train_step = get_train_step_fn(strategy)\r\n val_step = get_val_step_fn(strategy)\r\n \r\n loss = 0\r\n val_loss = 0\r\n print('Start Train')\r\n with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(gen):\r\n if iteration >= epoch_step:\r\n break\r\n images, targets = batch[0], batch[1]\r\n loss_value = train_step(images, targets, net, yolo_loss, optimizer)\r\n loss = loss + loss_value\r\n\r\n pbar.set_postfix(**{'total_loss': float(loss) / (iteration + 1), \r\n 'lr' : optimizer.lr.numpy()})\r\n pbar.update(1)\r\n print('Finish Train')\r\n \r\n print('Start Validation')\r\n with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:\r\n for iteration, batch in enumerate(gen_val):\r\n if iteration >= epoch_step_val:\r\n break\r\n images, targets = batch[0], batch[1]\r\n loss_value = val_step(images, targets, net, yolo_loss, optimizer)\r\n val_loss = val_loss + loss_value\r\n\r\n pbar.set_postfix(**{'total_loss': float(val_loss) / (iteration + 1)})\r\n pbar.update(1)\r\n print('Finish Validation')\r\n\r\n logs = {'loss': loss.numpy() / epoch_step, 'val_loss': val_loss.numpy() / epoch_step_val}\r\n loss_history.on_epoch_end([], logs)\r\n eval_callback.on_epoch_end(epoch, logs)\r\n print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))\r\n print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))\r\n \r\n #-----------------------------------------------#\r\n # 保存权值\r\n #-----------------------------------------------#\r\n if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:\r\n net.save_weights(os.path.join(save_dir, \"ep%03d-loss%.3f-val_loss%.3f.h5\" % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val)))\r\n \r\n if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):\r\n print('Save best model to best_epoch_weights.pth')\r\n net.save_weights(os.path.join(save_dir, \"best_epoch_weights.h5\"))\r\n \r\n net.save_weights(os.path.join(save_dir, \"last_epoch_weights.h5\"))" ]
[ [ "tensorflow.reduce_sum", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] } ]
aaronspring/doppyo
[ "e29e21fbb997f024f39d2e5e67decfc235b0dcca" ]
[ "doppyo/sugar.py" ]
[ "\"\"\"\n Collection of old doppyo functions and useful tidbits for internal dcfp use\n Authors: Dougie Squire and Thomas Moore\n Date created: 01/10/2018\n Python Version: 3.6\n\"\"\"\n\n# ===================================================================================================\n# Packages\n# ===================================================================================================\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\nimport cartopy\nfrom collections import Sequence\nfrom itertools import chain, count\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nfrom cartopy.util import add_cyclic_point\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\n# Load doppyo packages -----\nfrom doppyo import utils\n\n# ===================================================================================================\ndef rank_gufunc(x):\n ''' Returns ranked data along specified dimension '''\n \n import bottleneck\n ranks = bottleneck.nanrankdata(x,axis=-1)\n ranks = ranks[...,0]\n \n return ranks\n\n\ndef compute_rank(da_1, da_2, over_dim): \n ''' Feeds forecast and observation data to ufunc that ranks data along specified dimension'''\n \n # Add 'ensemble' coord to obs if one does not exist -----\n if over_dim not in da_2.coords:\n da_2_pass = da_2.copy()\n da_2_pass.coords[over_dim] = -1\n da_2_pass = da_2_pass.expand_dims(over_dim)\n else:\n da_2_pass = da_2.copy()\n\n # Only keep and combine instances that appear in both dataarrays (excluding the ensemble dim) -----\n aligned = xr.align(da_2_pass, da_1, join='inner', exclude=over_dim)\n combined = xr.concat(aligned, dim=over_dim)\n \n return xr.apply_ufunc(rank_gufunc, combined,\n input_core_dims=[[over_dim]],\n dask='allowed',\n output_dtypes=[int]).rename('rank')\n\n\n# ===================================================================================================\ndef categorize(da, bin_edges):\n \"\"\" \n Returns the indices of the bins to which each value in input array belongs \n Output indices are such that bin_edges[i-1] <= x < bin_edges[i]\n \"\"\"\n\n return xr.apply_ufunc(np.digitize, da, bin_edges,\n input_core_dims=[[],[]],\n dask='allowed',\n output_dtypes=[int]).rename('categorized')\n\n\n# ===================================================================================================\ndef unstack_and_count(da, dims):\n \"\"\" Unstacks provided xarray object and returns the total number of elements along dims \"\"\"\n \n try:\n unstacked = da.unstack(da.dims[0])\n except ValueError:\n unstacked = da\n\n if dims is None:\n return ((0 * unstacked) + 1)\n else:\n return ((0 * unstacked) + 1).sum(dim=dims, skipna=True)\n\n\ndef compute_histogram(da, bin_edges, over_dims):\n \"\"\" Returns the histogram of data over the specified dimensions \"\"\"\n\n # To use groupby_bins, da must have a name -----\n da = da.rename('data') \n \n hist = da.groupby_bins(da, bins=bin_edges, squeeze=False) \\\n .apply(unstack_and_count, dims=over_dims) \\\n .fillna(0) \\\n .rename({'data_bins' : 'bins'})\n hist['bins'] = (bin_edges[0:-1]+bin_edges[1:])/2\n \n # Add nans where data did not fall in any bin -----\n return hist.astype(int).where(hist.sum('bins') != 0)\n\n\n# ===================================================================================================\ndef calc_gradient(da, dim, x=None):\n \"\"\"\n Returns the gradient computed using second order accurate central differences in the \n interior points and either first order accurate one-sided (forward or backwards) \n differences at the boundaries\n\n See https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.gradient.html\n \"\"\" \n \n # Replace dimension values if specified -----\n da_n = da.copy()\n if x is None:\n x = da_n[dim]\n \n centre_chunk = range(len(x[dim])-2)\n \n f_hd = da_n.shift(**{dim:-2})\n f = da_n.shift(**{dim:-1})\n f_hs = da_n\n hs = x.shift(**{dim:-1}) - x\n hd = x.shift(**{dim:-2}) - x.shift(**{dim:-1})\n c = (hs ** 2 * f_hd + (hd ** 2 - hs ** 2) * f - hd ** 2 * f_hs) / \\\n (hs * hd * (hd + hs)).isel(**{dim : centre_chunk})\n c[dim] = x[dim][1:-1]\n\n l = (da_n.shift(**{dim:-1}) - da_n).isel(**{dim : 0}) / \\\n (x.shift(**{dim:-1}) - x).isel(**{dim : 0})\n\n r = (-da_n.shift(**{dim:1}) + da_n).isel(**{dim : -1}) / \\\n (-x.shift(**{dim:1}) + x).isel(**{dim : -1})\n \n grad = xr.concat([l, c, r], dim=dim)\n grad[dim] = da[dim]\n \n return grad\n\n\n# ===================================================================================================\ndef bias_correct_ms(da_biased, da_target, da_target_clim=None, init_date_name='init_date', \n lead_time_name='lead_time'):\n \"\"\"\n Adjusts, per month and lead time, the mean and standard deviation of da_biased to match that \n of da_target.\n Author: Dougie Squire\n Date: 01/09/2018\n \n Parameters\n ----------\n da_biased : xarray DataArray\n Array containing values to be corrected. The time information of this array is anticipated \n in a lead_time/inital_date format\n da_target : xarray DataArray\n Array containing values to use for the correction.\n da_target_clim : xarray DataArray, optional\n Array containing a climatology of da_target. If da_target_clim is provided, this function \n returns both the corrected full field and the anomalies. Otherwise, returns only the \n anomalies\n init_date_name : str, optional\n Name of initial date dimension\n lead_time_name : str, optional\n Name of lead time dimension\n \n Returns\n -------\n corrected : xarray DataArray\n Bias corrected array\n \n Examples\n --------\n >>> biased = xr.DataArray(np.random.normal(size=(48,6)), \n ... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')), \n ... ('lead_time', np.arange(6))])\n >>> biased['lead_time'].attrs['units'] = 'M'\n >>> target = xr.DataArray(np.random.normal(size=(48)), \n ... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))])\n >>> doppyo.utils.bias_correct_ms(biased, target)\n <xarray.DataArray (init_date: 48, lead_time: 6)>\n array([[ 9.336394e-02, 1.133997e-01, -5.851293e-01, -4.908594e-02,\n 7.952765e-01, 5.325052e-01],\n [-1.131123e+00, 1.603380e-01, -1.626906e+00, -1.811439e+00,\n -1.653359e-01, -1.871170e-01],\n [ 6.515435e-01, -1.064662e+00, 2.249610e+00, 6.881682e-01,\n -1.831233e-01, -1.159470e+00],\n ...,\n [-2.096226e+00, 3.143062e-04, 3.603787e-01, -1.515535e+00,\n 5.421578e-02, -6.446119e-01],\n [-8.186274e-01, -9.858171e-01, 1.933307e+00, 5.227265e-02,\n 5.443201e-01, -7.059492e-01],\n [ 2.253396e-02, 2.238470e+00, 1.138728e-01, -3.617103e-01,\n 1.678223e+00, -2.413158e+00]])\n Coordinates:\n * lead_time (lead_time) int64 0 1 2 3 4 5\n * init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31\n \n Notes\n -----------\n Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean and standard \n deviation to be computed reliably\n \"\"\"\n \n def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):\n \"\"\" Groups provided array by lead time and computes mean \"\"\"\n \n return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)\n\n def _groupby_lead_and_std(da, over_dims, init_date_name, lead_time_name):\n \"\"\" Groups provided array by lead time and computes standard deviation \"\"\"\n \n return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).std(over_dims, skipna=True)\n\n def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):\n \"\"\" Unstacks and adjusts input array by a constant shift as a function of month \"\"\"\n \n da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)\n the_month = np.ndarray.flatten(da_us.month.values)\n the_month = int(np.unique(the_month[~np.isnan(the_month)]))\n \n return da_us - shift.sel(month=the_month)\n\n def _unstack_and_scale_per_month(da, scale, init_date_name, lead_time_name):\n \"\"\" Unstacks and scales input array by a constant value as a function of month \"\"\"\n \n da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)\n the_month = np.ndarray.flatten(da_us.month.values)\n the_month = int(np.unique(the_month[~np.isnan(the_month)]))\n \n return da_us * scale.sel(month=the_month)\n\n def _scale_per_month(da, scale):\n \"\"\" Scales input array by a constant value as a function of month \"\"\"\n \n return da.groupby('time.month') * scale\n \n _anomalize = lambda data, clim: datetime_to_leadtime(\n anomalize(\n leadtime_to_datetime(data),clim))\n\n _rescale = lambda da, scale : datetime_to_leadtime(\n _scale_per_month(\n leadtime_to_datetime(da), scale))\n\n da_biased = da_biased.copy()\n da_target = da_target.copy()\n month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12\n month = month.where(month != 0, 12)\n\n # Correct the mean -----\n da_biased.coords['month'] = month\n try:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n except ValueError:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n \n if da_target_clim is not None:\n da_target_mean = da_target.groupby('time.month').mean('time')\n \n da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_meancorr[lead_time_name] = da_biased[lead_time_name]\n da_meancorr.coords['month'] = month\n\n # Compute the corrected anomalies -----\n da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)\n da_anom_meancorr.coords['month'] = month\n else:\n da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]\n da_anom_meancorr.coords['month'] = month\n \n # Correct the standard deviation -----\n try:\n da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=[init_date_name,'ensemble'],\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n except ValueError:\n da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=init_date_name,\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n try:\n da_target_std = da_target.sel(lat=da_biased.lat, lon=da_biased.lon).groupby('time.month').std('time')\n except:\n da_target_std = da_target.groupby('time.month').std('time')\n \n da_anom_stdcorr_tmp = da_anom_meancorr.groupby('month').apply(_unstack_and_scale_per_month, \n scale=(da_target_std / da_biased_std_tmp),\n init_date_name=init_date_name, \n lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_anom_stdcorr_tmp[lead_time_name] = da_biased[lead_time_name]\n da_anom_stdcorr_tmp.coords['month'] = month\n \n # This will \"squeeze\" each pdf at each lead time appropriately. However, the total variance across all leads for \n # a given month will now be incorrect. Thus, we now rescale as a function of month only\n try:\n da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std(['time','ensemble'])\n except ValueError:\n da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std('time')\n da_anom_stdcorr = da_anom_stdcorr_tmp.groupby(init_date_name).apply(_rescale, scale=(da_target_std / da_biased_std))\n \n if da_target_clim is not None:\n da_stdcorr = da_anom_stdcorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)\n return da_stdcorr.drop('month'), da_anom_stdcorr.drop('month')\n else:\n return da_anom_stdcorr.drop('month')\n\n\n# ===================================================================================================\ndef bias_correct_m(da_biased, da_target, da_target_clim=None, init_date_name='init_date', \n lead_time_name='lead_time'):\n \"\"\"\n Adjusts, per month and lead time, the mean of da_biased to match that of da_target\n Author: Dougie Squire\n Date: 01/09/2018\n \n Parameters\n ----------\n da_biased : xarray DataArray\n Array containing values to be corrected. The time information of this array is anticipated \n in a lead_time/inital_date format\n da_target : xarray DataArray\n Array containing values to use for the correction.\n da_target_clim : xarray DataArray, optional\n Array containing a climatology of da_target. If da_target_clim is provided, this function \n returns both the corrected full field and the anomalies. Otherwise, returns only the \n anomalies\n init_date_name : str, optional\n Name of initial date dimension\n lead_time_name : str, optional\n Name of lead time dimension\n \n Returns\n -------\n corrected : xarray DataArray\n Bias corrected array\n \n Examples\n --------\n >>> biased = xr.DataArray(np.random.normal(size=(48,6)), \n ... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')), \n ... ('lead_time', np.arange(6))])\n >>> biased['lead_time'].attrs['units'] = 'M'\n >>> target = xr.DataArray(np.random.normal(size=(48)), \n ... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))])\n >>> doppyo.utils.bias_correct_m(biased, target)\n <xarray.DataArray (init_date: 48, lead_time: 6)>\n array([[ 0.541226, 0.693622, -0.367322, 0.820282, 0.111487, 0.078355],\n [-0.299829, 0.164297, -0.976883, 0.463365, -0.26428 , -0.536119],\n [ 0.078832, -0.260615, -0.235059, -0.349185, 0.567183, -1.543395],\n ...,\n [ 0.335494, -1.121158, 1.313004, 0.604279, 0.135053, 0.031851],\n [ 0.33103 , 0.876521, -0.980873, 0.640328, 1.053691, 0.166768],\n [ 1.207329, 0.021916, 0.210883, -0.189922, 0.075786, 0.047616]])\n Coordinates:\n * init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31\n * lead_time (lead_time) int64 0 1 2 3 4 5\n \n Notes\n -----------\n Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean to be \n computed reliably\n \"\"\"\n\n def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):\n \"\"\" Groups provided array by lead time and computes mean \"\"\"\n \n return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)\n \n def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):\n \"\"\" Unstacks and adjusts input array by a constant shift as a function of month \"\"\"\n \n da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)\n the_month = np.ndarray.flatten(da_us.month.values)\n the_month = int(np.unique(the_month[~np.isnan(the_month)]))\n \n return da_us - shift.sel(month=the_month)\n \n _anomalize = lambda data, clim: datetime_to_leadtime(\n anomalize(\n leadtime_to_datetime(data),clim))\n \n da_biased = da_biased.copy()\n da_target = da_target.copy()\n \n month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12\n month = month.where(month != 0, 12)\n\n # Correct the mean -----\n da_biased.coords['month'] = month\n try:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n except ValueError:\n da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,\n init_date_name=init_date_name, lead_time_name=lead_time_name)\n \n if da_target_clim is not None:\n da_target_mean = da_target.groupby('time.month').mean('time')\n \n da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_meancorr[lead_time_name] = da_biased[lead_time_name]\n da_meancorr.coords['month'] = month\n\n # Compute the corrected anomalies -----\n da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)\n da_anom_meancorr.coords['month'] = month\n else:\n da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),\n init_date_name=init_date_name, lead_time_name=lead_time_name) \\\n .mean('month', skipna=True)\n da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]\n da_anom_meancorr.coords['month'] = month\n \n if da_target_clim is not None:\n da_meancorrr = da_anom_meancorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)\n return da_meancorr.drop('month'), da_anom_meancorr.drop('month')\n else:\n return da_anom_meancorr.drop('month')\n\n \n# ===================================================================================================\ndef conditional_bias_correct(da_cmp, da_ref, over_dims):\n \"\"\"\n Return conditional bias corrected data using the approach of Goddard et al. 2013\n \n \n \"\"\"\n\n cc = skill.compute_Pearson_corrcoef(da_cmp.mean('ensemble'), da_ref, over_dims=over_dims, subtract_local_mean=False)\n correct_cond_bias = (da_ref.std(over_dims) / da_cmp.mean('ensemble').std(over_dims)) * cc\n \n return da_cmp * correct_cond_bias\n\n\n# ===================================================================================================\ndef trunc_time(time, freq):\n \"\"\" \n Truncates values in provided time array to provided frequency. E.g. 2018-01-15T12:00 with \n freq = 'M' becomes 2018-01-01. \n \"\"\"\n \n return time.astype('<M8[' + freq + ']')\n\n\n# ===================================================================================================\ndef month_delta(date_in, delta, trunc_to_start=False):\n \"\"\" Increments provided datetime64 array by delta months \"\"\"\n \n date_mod = pd.Timestamp(date_in)\n \n m, y = (date_mod.month + delta) % 12, date_mod.year + ((date_mod.month) + delta - 1) // 12\n \n if not m: m = 12\n \n d = min(date_mod.day, [31,\n 29 if y % 4 == 0 and not y % 400 == 0 else 28,31,30,31,30,31,31,30,31,30,31][m - 1])\n \n if trunc_to_start:\n date_out = trunc_time(np.datetime64(date_mod.replace(day=d,month=m, year=y)),'M')\n else:\n date_out = np.datetime64(date_mod.replace(day=d,month=m, year=y))\n \n return np.datetime64(date_out,'ns')\n\n\n# ===================================================================================================\ndef year_delta(date_in, delta, trunc_to_start=False):\n \"\"\" Increments provided datetime64 array by delta years \"\"\"\n \n date_mod = month_delta(date_in, 12 * delta)\n \n if trunc_to_start:\n date_out = trunc_time(date_mod,'Y')\n else: date_out = date_mod\n \n return date_out\n\n\n# ===================================================================================================\ndef datetime_to_leadtime(data_in):\n \"\"\" Converts time information from single datetime dimension to init_date/lead_time dimension pair \"\"\"\n \n init_date = data_in.time.values[0]\n lead_times = range(len(data_in.time))\n\n try:\n freq = pd.infer_freq(data_in.time.values)\n \n # If pandas tries to assign start time to frequency (e.g. QS-OCT), remove this -----\n if '-' in freq:\n freq = freq[:freq.find('-')]\n \n # Split frequency into numbers and strings -----\n incr_string = ''.join([i for i in freq if i.isdigit()])\n freq_incr = [int(incr_string) if incr_string else 1][0]\n freq_type = ''.join([i for i in freq if not i.isdigit()])\n \n # Specify all lengths great than 1 month in months -----\n if 'QS' in freq_type:\n freq = str(3*freq_incr) + 'MS'\n elif 'Q' in freq_type:\n freq = str(3*freq_incr) + 'M'\n elif ('YS' in freq_type) | ('AS' in freq_type):\n freq = str(12*freq_incr) + 'MS'\n elif ('Y' in freq_type) | ('A' in freq_type):\n freq = str(12*freq_incr) + 'M'\n \n except ValueError:\n dt = (data_in.time.values[1] - data_in.time.values[0]) / np.timedelta64(1, 's')\n month = data_in.time.dt.month[0]\n if dt == 60*60*24:\n freq = 'D'\n elif ((month == 1) | (month == 3) | (month == 5) | (month == 7) | (month == 8) | (month == 10) | \n (month == 12)) & (dt == 31*60*60*24):\n freq = 'MS'\n elif ((month == 4) | (month == 6) | (month == 9) | (month == 11)) & (dt == 30*60*60*24):\n freq = 'MS'\n elif (month == 2) & ((dt == 28*60*60*24) | (dt == 29*60*60*24)): \n freq = 'MS'\n elif (dt == 365*60*60*24) | (dt == 366*60*60*24):\n freq = 'A'\n else:\n freq = 'NA'\n\n data_out = data_in.rename({'time' : 'lead_time'})\n data_out['lead_time'] = lead_times\n data_out['lead_time'].attrs['units'] = freq\n\n data_out.coords['init_date'] = init_date\n \n return data_out\n\n\n# ===================================================================================================\ndef leadtime_to_datetime(data_in, init_date_name='init_date', lead_time_name='lead_time'):\n \"\"\" Converts time information from lead time/initial date dimension pair to single datetime dimension \"\"\"\n \n try:\n init_date = data_in[init_date_name].values[0]\n except IndexError:\n init_date = data_in[init_date_name].values\n \n lead_times = list(map(int, data_in[lead_time_name].values))\n freq = data_in[lead_time_name].attrs['units']\n \n # # Split frequency into numbers and strings -----\n # incr_string = ''.join([i for i in freq if i.isdigit()])\n # freq_incr = [int(incr_string) if incr_string else 1][0]\n # freq_type = ''.join([i for i in freq if not i.isdigit()])\n\n # Deal with special cases of monthly and yearly frequencies -----\n # if 'M' in freq_type:\n # datetimes = np.array([month_delta(init_date, freq_incr * ix) for ix in lead_times])\n # elif ('A' in freq_type) | ('Y' in freq_type):\n # datetimes = np.array([year_delta(init_date, freq_incr * ix) for ix in lead_times])\n # else:\n # datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values \n datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values\n \n data_out = data_in.drop(init_date_name)\n data_out = data_out.rename({lead_time_name : 'time'})\n data_out['time'] = datetimes\n \n return prune(data_out)\n\n\n# ===================================================================================================\ndef get_nearest_point(da, lat, lon):\n \"\"\" Returns the nearest grid point to the specified lat/lon location \"\"\"\n\n return da.sel(lat=lat,lon=lon,method='nearest')\n\n\n# ===================================================================================================\n# visualization tools\n# ===================================================================================================\ndef plot_fields(data, title=None, headings=None, ncol=2, contour=False, vlims=None, clims=None, squeeze_row=1, \n squeeze_col=1, squeeze_cbar=1, shift_cbar=1, cmap='viridis', fontsize=12, invert=False):\n \"\"\" Plots tiles of figures \"\"\"\n \n def _depth(seq):\n for level in count():\n if not seq:\n return level\n seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence)))\n\n matplotlib.rc('font', family='sans-serif')\n matplotlib.rc('font', serif='Helvetica') \n matplotlib.rc('text', usetex='false') \n matplotlib.rcParams.update({'font.size': fontsize})\n\n nrow = int(np.ceil(len(data)/ncol));\n\n fig = plt.figure(figsize=(11*squeeze_col, nrow*4*squeeze_row))\n \n if (clims is not None) & (np.shape(vlims) != np.shape(clims)):\n raise ValueError('The input clims must be equal in size to vlims')\n \n # Check if vlims are given per figure or for all figures -----\n one_cbar = False\n if vlims is None:\n vlims = [[None, None]] * len(data)\n if _depth(vlims) == 1:\n one_cbar = True\n \n over_count = 1\n for idx,dat in enumerate(data):\n if one_cbar:\n vmin, vmax = vlims\n if clims is not None:\n cmin, cmax = clims\n else:\n vmin, vmax = vlims[idx]\n if clims is not None:\n cmin, cmax = clims[idx]\n \n if ('lat' in dat.dims) and ('lon' in dat.dims):\n trans = cartopy.crs.PlateCarree()\n ax = plt.subplot(nrow, ncol, over_count, projection=cartopy.crs.PlateCarree(central_longitude=180))\n extent = [dat.lon.min(), dat.lon.max(), \n dat.lat.min(), dat.lat.max()]\n\n if contour is True:\n if clims is not None:\n ax.coastlines(color='gray')\n im = ax.contourf(dat.lon, dat.lat, dat, levels=np.linspace(vmin,vmax,12), origin='lower', transform=trans, \n vmin=vmin, vmax=vmax, cmap=cmap)\n ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,\n vmin=vmin, vmax=vmax, colors='w', linewidths=2)\n ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,\n vmin=vmin, vmax=vmax, colors='k', linewidths=1)\n else:\n ax.coastlines(color='black')\n im = ax.contourf(dat.lon, dat.lat, dat, origin='lower', transform=trans, vmin=vmin, vmax=vmax, \n cmap=cmap)\n else:\n ax.coastlines(color='black')\n im = ax.imshow(dat, origin='lower', extent=extent, transform=trans, vmin=vmin, vmax=vmax, cmap=cmap)\n\n gl = ax.gridlines(crs=cartopy.crs.PlateCarree(), draw_labels=True)\n gl.xlines = False\n gl.ylines = False\n gl.xlabels_top = False\n if over_count % ncol == 0:\n gl.ylabels_left = False\n elif (over_count+ncol-1) % ncol == 0: \n gl.ylabels_right = False\n else:\n gl.ylabels_left = False\n gl.ylabels_right = False\n gl.xlocator = mticker.FixedLocator([-90, 0, 90, 180])\n gl.ylocator = mticker.FixedLocator([-90, -60, 0, 60, 90])\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n \n if not one_cbar:\n cbar = plt.colorbar(im, ax=ax, orientation=\"horizontal\", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)\n tick_locator = mticker.MaxNLocator(nbins=6)\n cbar.locator = tick_locator\n cbar.update_ticks()\n if headings is not None:\n cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);\n elif headings is not None:\n ax.set_title(headings[idx], fontsize=fontsize)\n else:\n ax = plt.subplot(nrow, ncol, over_count)\n if 'lat' in dat.dims:\n x_plt = dat['lat']\n y_plt = dat[utils.get_other_dims(dat,'lat')[0]]\n # if dat.get_axis_num('lat') > 0:\n # dat = dat.transpose()\n elif 'lon' in dat.dims:\n x_plt = dat['lon']\n y_plt = dat[utils.get_other_dims(dat,'lon')[0]]\n # if dat.get_axis_num('lon') > 0:\n # dat = dat.transpose()\n else: \n x_plt = dat[dat.dims[1]]\n y_plt = dat[dat.dims[0]]\n \n extent = [x_plt.min(), x_plt.max(), \n y_plt.min(), y_plt.max()]\n \n if contour is True:\n if clims is not None:\n im = ax.contourf(x_plt, y_plt, dat, levels=np.linspace(vmin,vmax,12), vmin=vmin, vmax=vmax, \n cmap=cmap)\n ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='w', linewidths=2)\n ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='k', linewidths=1)\n else:\n im = ax.contourf(x_plt, y_plt, dat, vmin=vmin, vmax=vmax, cmap=cmap)\n else:\n im = ax.imshow(dat, origin='lower', extent=extent, vmin=vmin, vmax=vmax, cmap=cmap)\n \n if over_count % ncol == 0:\n ax.yaxis.tick_right()\n elif (over_count+ncol-1) % ncol == 0: \n ax.set_ylabel(y_plt.dims[0], fontsize=fontsize)\n else:\n ax.set_yticks([])\n if idx / ncol >= nrow - 1:\n ax.set_xlabel(x_plt.dims[0], fontsize=fontsize)\n \n if not one_cbar:\n cbar = plt.colorbar(im, ax=ax, orientation=\"horizontal\", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)\n tick_locator = mticker.MaxNLocator(nbins=6)\n cbar.locator = tick_locator\n cbar.update_ticks()\n if headings is not None:\n cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);\n elif headings is not None:\n ax.set_title(headings[idx], fontsize=fontsize)\n \n if invert:\n ax.invert_yaxis()\n\n over_count += 1\n\n plt.tight_layout()\n \n if one_cbar:\n vmin, vmax = vlims\n fig.subplots_adjust(bottom=shift_cbar*0.16)\n cbar_ax = fig.add_axes([0.15, 0.13, 0.7, squeeze_cbar*0.020])\n cbar = fig.colorbar(im, cax=cbar_ax, orientation='horizontal');\n cbar_ax.set_xlabel(title, rotation=0, labelpad=15, fontsize=fontsize);\n cbar.set_ticks(np.linspace(vmin,vmax,5))\n elif title is not None:\n fig.suptitle(title, y=1)\n \n \n# ===================================================================================================\ndef size_GB(xr_object):\n \"\"\"\n How many GB (or GiB) is your xarray object?\n \n // Requires an xarray object\n \n // Returns:\n * equivalent GB (GBytes) - 10^9 conversion\n * equivalent GiB (GiBytes) - 2^ 30 conversion\n \n < Thomas Moore - [email protected] - 10102018 >\n \"\"\" \n bytes = xr_object.nbytes\n Ten2the9 = 10**9\n Two2the30 = 2**30\n GBytes = bytes / Ten2the9\n GiBytes = bytes / Two2the30\n \n #print out results\n print(xr_object.name, \"is\", GBytes, \"GB\", 'which is', GiBytes,\"GiB\")\n \n \n return GBytes,GiBytes\n\n\n# ===================================================================================================\ndef get_pres_name(da):\n \"\"\" \n Returns name of pressure dimension in input array\n Author: Dougie Squire\n Date: 03/03/2018\n \n Parameters\n ----------\n da : xarray DataArray\n Array with coordinate corresponding to pressure\n \n Returns\n -------\n name : str\n Name of dimension corresponding to pressure\n \n Examples\n --------\n >>> A = xr.DataArray(np.random.normal(size=(2,2,2,2,2)), \n ... coords=[('lat', np.arange(2)), ('lon', np.arange(2)), \n ... ('depth', np.arange(2)), ('level', np.arange(2)), \n ... ('pfull', np.arange(2))])\n >>> doppyo.utils.get_pres_name(A)\n 'pfull'\n \"\"\"\n \n if 'pfull' in da.dims:\n return 'pfull'\n elif 'phalf' in da.dims:\n return 'phalf'\n else:\n raise KeyError('Unable to determine pressure dimension')\n pass\n \n \n# =================================================================================================== \ndef did_event(da, event):\n \"\"\" \n Returns array containing True/False where event occurs/does not occur \n \n Notes\n -----\n See http://www.cawcr.gov.au/projects/verification/\n \"\"\"\n \n eval_expr = event.replace(\">\", \"da >\").replace(\"<\", \"da <\").replace(\"==\", \"da ==\") \\\n .replace(\"=\", \"da ==\").replace('&&', '&').replace('||', '|') \\\n .replace(\"and\", \"&\").replace(\"or\", \"|\")\n eval_expr = '(' + eval_expr + ').rename(\"event_logical\")'\n \n return eval(eval_expr)\n\n\n# ===================================================================================================\ndef compute_likelihood(da_logical, dim='ensemble'):\n \"\"\" \n Returns array of likelihoods computed along dim from logical event data \n \n Notes\n -----\n See http://www.cawcr.gov.au/projects/verification/\n \"\"\"\n \n if dim == None:\n likelihood = da_logical\n else:\n likelihood = da_logical.mean(dim=dim).rename('likelihood')\n return likelihood\n\n\n# ===================================================================================================\ndef atmos_energy_cycle(temp, u, v, omega, gh, terms=None, vgradz=False, spectral=False, n_wavenumbers=20,\n integrate=True, loop_triple_terms=False, lat_name=None, lon_name=None, \n plevel_name=None):\n \"\"\"\n Returns all terms in the Lorenz energy cycle. Follows formulae and notation used in `Marques \n et al. 2011 Global diagnostic energetics of five state-of-the-art climate models. Climate \n Dynamics`. Note that this decomposition is in the space domain. A space-time decomposition \n can also be carried out (though not in Fourier space, but this is not implemented here (see \n `Oort. 1964 On Estimates of the atmospheric energy cycle. Monthly Weather Review`).\n\n Parameters\n ----------\n temp : xarray DataArray\n Array containing fields of temperature with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n u : xarray DataArray\n Array containing fields of zonal velocity with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n v : xarray DataArray\n Array containing fields of meridional velocity with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n omega : xarray DataArray\n Array containing fields of vertical velocity (pressure coordinates) with at least coordinates \n latitude, longitude and level (following standard naming - see Limitations)\n gh : xarray DataArray\n Array containing fields of geopotential height with at least coordinates latitude, longitude \n and level (following standard naming - see Limitations)\n terms : str or sequence of str\n List of terms to compute. If None, returns all terms. Available options are:\n Pz; total available potential energy in the zonally averaged temperature distribution\n Kz; total kinetic energy in zonally averaged motion\n Pe; total eddy available potential energy [= sum_n Pn (n > 0 only) for spectral=True] (Note that \n for spectral=True, an additional term, Sn, quantifying the rate of transfer of available potential \n energy to eddies of wavenumber n from eddies of all other wavenumbers is also returned)\n Ke; total eddy kinetic energy [= sum_n Kn (n > 0 only) for spectral=True] (Note that for \n spectral=True, an additional term, Ln, quantifying the rate of transfer of kinetic energy to eddies \n of wavenumber n from eddies of all other wavenumbers is also returned)\n Cz; rate of conversion of zonal available potential energy to zonal kinetic energy\n Ca; rate of transfer of total available potential energy in the zonally averaged temperature \n distribution (Pz) to total eddy available potential energy (Pe) [= sum_n Rn (n > 0 only) for \n spectral=True]\n Ce; rate of transfer of total eddy available potential energy (Pe) to total eddy kinetic energy \n (Ke) [= sum_n Cn (n > 0 only) for spectral=True]\n Ck; rate of transfer of total eddy kinetic energy (Ke) to total kinetic energy in zonally \n averaged motion (Kz) [= sum_n Mn (n > 0 only) for spectral=True]\n Gz; rate of generation of zonal available potential energy due to the zonally averaged heating (Pz).\n Note that this term is computed as a residual (Cz + Ca) and cannot be returned in spectral space. \n If Gz is requested with spectral=True, Gz is returned in real-space only\n Ge; rate of generation of eddy available potential energy (Pe). Note that this term is computed as \n a residual (Ce - Ca) and cannot be returned in spectral space. If Ge is requested with spectral=True, \n Ge is returned in real-space only\n Dz; rate of viscous dissipation of zonal kinetic energy (Kz). Note that this term is computed as a \n residual (Cz - Ck) and cannot be returned in spectral space. If Dz is requested with spectral=True, Dz \n is returned in real-space only\n De; rate of dissipation of eddy kinetic energy (Ke). Note that this term is computed as a residual \n (Ce - Ck) and cannot be returned in spectral space. If De is requested with spectral=True, De is \n returned in real-space only\n vgradz : bool, optional\n If True, uses `v-grad-z` approach for computing terms relating to conversion\n of potential energy to kinetic energy. Otherwise, defaults to using the \n `omaga-alpha` approach (see reference above for details)\n spectral : bool, optional\n If True, computes all terms as a function of wavenumber on longitudinal bands. To use this \n option, longitudes must be regularly spaced. Note that Ge and De are computed as residuals and\n cannot be computed in spectral space\n n_wavenumbers : int, optional\n Number of wavenumbers to retain either side of wavenumber=0. Obviously only does anything if \n spectral=True\n integrate : bool, optional\n If True, computes and returns the integral of each term over the mass of the \n atmosphere. Otherwise, only the integrands are returned.\n\n Returns\n -------\n atmos_energy_cycle : xarray Dataset\n \n \n Limitations\n -----------\n All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(), \n doppyo.utils.get_lon_name(), etc)\n Pressure levels must be provided in units of hPa\n \n Notes\n -----\n The following notation is used below (stackable, e.g. *_ZT indicates the time average of the zonal \n average):\n *_A -> area average over an isobaric surface\n *_a -> departure from area average\n *_Z -> zonal average\n *_z -> departure from zonal average\n *_T -> time average\n *_t -> departure from time average\n Additionally, capital variables indicate Fourier transforms:\n F(u) = U\n F(v) = V\n F(omega) = O\n F(gh) = A\n F(temp) = B\n \"\"\"\n \n def _flip_n(da):\n \"\"\" Flips data along wavenumber coordinate \"\"\"\n\n daf = da.copy()\n daf['n'] = -daf['n']\n\n return daf.sortby(daf['n'])\n\n\n def _truncate(F, n_truncate, dim):\n \"\"\" \n Converts spatial frequency dim to wavenumber, n, and truncates all wavenumbers greater than \n n_truncate \n \"\"\"\n F[dim] = 360 * F[dim]\n F = F.rename({dim : 'n'})\n F = F.where(abs(F.n) <= n_truncate, drop=True)\n return F, _flip_n(F)\n\n\n def _triple_terms(A, B, C):\n \"\"\" \n Calculate triple term summation of the form \\int_{m=-inf}^{inf} A(m) * B(n) * C(n - m)\n \"\"\"\n\n # Use rolling operator to build shifted terms -----\n Am = A.rename({'n' : 'm'})\n Cnm = C.rolling(n=len(C.n), center=True).construct('m', fill_value=0)\n Cnm['m'] = -C['n'].values\n\n # Drop m = 0 and n < 0 -----\n Am = Am.where(Am['m'] != 0, drop=True) \n Cnm = Cnm.where(Cnm['m'] != 0, drop=True)\n\n return (B * (Am * Cnm)).sum(dim='m', skipna=False)\n\n\n def _triple_terms_loop(A, B, C):\n \"\"\" \n Calculate triple term summation of the form \\int_{m=-inf}^{inf} A(m) * B(n) * C(n - m)\n \"\"\"\n\n # Loop over all m's and perform rolling sum -----\n ms = A['n'].where(A['n'] != 0, drop=True).values\n ABC = A.copy() * 0\n for m in ms:\n Am = A.sel(n=m)\n Cnm = C.shift(n=int(m)).fillna(0)\n ABC = ABC + (Am * B * Cnm)\n\n return ABC\n \n if terms is None:\n terms = ['Pz', 'Kz', 'Pe', 'Ke', 'Cz', 'Ca', 'Ce', 'Ck', 'Gz', 'Ge', 'Dz', 'De']\n if isinstance(terms, str):\n terms = [terms]\n \n # Initialize some things -----\n if lat_name is None:\n lat_name = utils.get_lat_name(temp)\n if lon_name is None:\n lon_name = utils.get_lon_name(temp)\n if plevel_name is None:\n plevel_name = utils.get_plevel_name(temp)\n \n degtorad = utils.constants().pi / 180\n tan_lat = xr.ufuncs.tan(temp[lat_name] * degtorad)\n cos_lat = xr.ufuncs.cos(temp[lat_name] * degtorad) \n \n # Determine the stability parameter using Saltzman's approach -----\n kappa = utils.constants().R_d / utils.constants().C_pd\n p_kap = (1000 / temp[plevel_name]) ** kappa\n theta_A = utils.average(temp * p_kap, [lat_name, lon_name], weights=cos_lat)\n dtheta_Adp = utils.differentiate_wrt(theta_A, dim=plevel_name, x=(theta_A[plevel_name] * 100))\n gamma = - p_kap * (utils.constants().R_d) / ((temp[plevel_name] * 100) * utils.constants().C_pd) / dtheta_Adp # [1/K]\n energies = gamma.rename('gamma').to_dataset()\n \n # Compute zonal terms\n # ========================\n \n if ('Pz' in terms):\n # Compute the total available potential energy in the zonally averaged temperature\n # distribution, Pz [also commonly called Az] -----\n temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)\n temp_Z = temp.mean(dim=lon_name)\n temp_Za = temp_Z - temp_A\n Pz_int = gamma * utils.constants().C_pd / 2 * temp_Za ** 2 # [J/kg]\n energies['Pz_int'] = Pz_int\n if integrate:\n Pz = _int_over_atmos(Pz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]\n energies['Pz'] = Pz\n \n if ('Kz' in terms):\n # Compute the total kinetic energy in zonally averaged motion, Kz [also commonly \n # called Kz] -----\n u_Z = u.mean(dim=lon_name)\n v_Z = v.mean(dim=lon_name)\n Kz_int = 0.5 * (u_Z ** 2 + v_Z ** 2) # [J/kg]\n energies['Kz_int'] = Kz_int\n if integrate:\n Kz = _int_over_atmos(Kz_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]\n energies['Kz'] = Kz\n \n if ('Cz' in terms):\n # Compute the rate of conversion of zonal available potential energy (Pz) to zonal kinetic\n # energy (Kz), Cz [also commonly called Cz] -----\n if vgradz:\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n gh_Z = gh.mean(dim=lon_name)\n dghdlat = utils.differentiate_wrt(gh_Z, dim=lat_name, x=(gh_Z[lat_name] * degtorad))\n Cz_int = - (utils.constants().g / utils.constants().R_earth) * v_Z * dghdlat # [W/kg]\n energies['Cz_int'] = Cz_int\n if integrate:\n Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=gh[lon_name]) # [W/m^2]\n energies['Cz'] = Cz\n else:\n if 'temp_Za' not in locals():\n temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)\n temp_Z = temp.mean(dim=lon_name)\n temp_Za = temp_Z - temp_A\n omega_A = utils.average(omega, [lat_name, lon_name], weights=cos_lat)\n omega_Z = omega.mean(dim=lon_name)\n omega_Za = omega_Z - omega_A\n Cz_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * omega_Za * temp_Za # [W/kg]\n energies['Cz_int'] = Cz_int\n if integrate:\n Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=omega[lon_name]) # [W/m^2]\n energies['Cz'] = Cz\n \n # Compute eddy terms in Fourier space if spectral=True\n # ==========================================================\n if spectral:\n \n if ('Pe' in terms):\n # Compute the total available potential energy eddies of wavenumber n, Pn -----\n Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / \n len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n Pn_int = (gamma * utils.constants().C_pd * abs(Bp) ** 2)\n energies['Pn_int'] = Pn_int\n if integrate:\n Pn = _int_over_atmos(Pn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]\n energies['Pn'] = Pn\n\n # Compute the rate of transfer of available potential energy to eddies of \n # wavenumber n from eddies of all other wavenumbers, Sn -----\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /\n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /\n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /\n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n \n dBpdlat = utils.differentiate_wrt(Bp, dim=lat_name, x=(Bp[lat_name] * degtorad))\n dBndlat = utils.differentiate_wrt(Bn, dim=lat_name, x=(Bn[lat_name] * degtorad))\n dBpdp = utils.differentiate_wrt(Bp, dim=plevel_name, x=(Bp[plevel_name] * 100))\n dBndp = utils.differentiate_wrt(Bn, dim=plevel_name, x=(Bn[plevel_name] * 100))\n\n if loop_triple_terms:\n BpBnUp = _triple_terms_loop(Bp, Bn, Up)\n BpBpUn = _triple_terms_loop(Bp, Bp, Un)\n BpglBnVp = _triple_terms_loop(Bp, dBndlat, Vp)\n BpglBpVn = _triple_terms_loop(Bp, dBpdlat, Vn)\n BpgpBnOp = _triple_terms_loop(Bp, dBndp, Op)\n BpgpBpOn = _triple_terms_loop(Bp, dBpdp, On)\n BpBnOp = _triple_terms_loop(Bp, Bn, Op)\n BpBpOn = _triple_terms_loop(Bp, Bp, On)\n else:\n BpBnUp = _triple_terms(Bp, Bn, Up)\n BpBpUn = _triple_terms(Bp, Bp, Un)\n BpglBnVp = _triple_terms(Bp, dBndlat, Vp)\n BpglBpVn = _triple_terms(Bp, dBpdlat, Vn)\n BpgpBnOp = _triple_terms(Bp, dBndp, Op)\n BpgpBpOn = _triple_terms(Bp, dBpdp, On)\n BpBnOp = _triple_terms(Bp, Bn, Op)\n BpBpOn = _triple_terms(Bp, Bp, On)\n\n Sn_int = -gamma * utils.constants().C_pd * (1j * Bp['n']) / \\\n (utils.constants().R_earth * xr.ufuncs.cos(Bp[lat_name] * degtorad)) * \\\n (BpBnUp + BpBpUn) + \\\n gamma * utils.constants().C_pd / utils.constants().R_earth * \\\n (BpglBnVp + BpglBpVn) + \\\n gamma * utils.constants().C_pd * (BpgpBnOp + BpgpBpOn) + \\\n gamma * utils.constants().R_d / Bp[plevel_name] * \\\n (BpBnOp + BpBpOn)\n energies['Sn_int'] = Sn_int\n if integrate:\n Sn = abs(_int_over_atmos(Sn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]\n energies['Sn'] = Sn\n \n if ('Ke' in terms):\n # Compute the total kinetic energy in eddies of wavenumber n, Kn -----\n if 'U' not in locals():\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /\n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n Kn_int = abs(Up) ** 2 + abs(Vp) ** 2\n energies['Kn_int'] = Kn_int\n if integrate:\n Kn = _int_over_atmos(Kn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]\n energies['Kn'] = Kn\n\n # Compute the rate of transfer of kinetic energy to eddies of wavenumber n from \n # eddies of all other wavenumbers, Ln -----\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n \n dUpdp = utils.differentiate_wrt(Up, dim=plevel_name, x=(Up[plevel_name] * 100))\n dVpdp = utils.differentiate_wrt(Vp, dim=plevel_name, x=(Vp[plevel_name] * 100))\n dOpdp = utils.differentiate_wrt(Op, dim=plevel_name, x=(Op[plevel_name] * 100))\n dOndp = utils.differentiate_wrt(On, dim=plevel_name, x=(On[plevel_name] * 100))\n dVpcdl = utils.differentiate_wrt(Vp * cos_lat, dim=lat_name, x=(Vp[lat_name] * degtorad))\n dVncdl = utils.differentiate_wrt(Vn * cos_lat, dim=lat_name, x=(Vn[lat_name] * degtorad))\n dUpdl = utils.differentiate_wrt(Up, dim=lat_name, x=(Up[lat_name] * degtorad))\n dVpdl = utils.differentiate_wrt(Vp, dim=lat_name, x=(Vp[lat_name] * degtorad))\n\n if loop_triple_terms:\n UpUnUp = _triple_terms_loop(Up, Un, Up)\n UpUpUn = _triple_terms_loop(Up, Up, Un)\n VpVnUp = _triple_terms_loop(Vp, Vn, Up)\n VpVpUn = _triple_terms_loop(Vp, Vp, Un)\n VpUnUp = _triple_terms_loop(Vp, Un, Up)\n VpUpUn = _triple_terms_loop(Vp, Up, Un)\n UpVnUp = _triple_terms_loop(Up, Vn, Up)\n UpVpUn = _triple_terms_loop(Up, Vp, Un)\n gpUpUngpOp = _triple_terms_loop(dUpdp, Un, dOpdp)\n gpUpUpgpOn = _triple_terms_loop(dUpdp, Up, dOndp)\n gpVpVngpOp = _triple_terms_loop(dVpdp, Vn, dOpdp)\n gpVpVpgpOn = _triple_terms_loop(dVpdp, Vp, dOndp)\n glUpUnglVpc = _triple_terms_loop(dUpdl, Un, dVpcdl)\n glUpUpglVnc = _triple_terms_loop(dUpdl, Up, dVncdl)\n glVpVnglVpc = _triple_terms_loop(dVpdl, Vn, dVpcdl)\n glVpVpglVnc = _triple_terms_loop(dVpdl, Vp, dVncdl)\n else:\n UpUnUp = _triple_terms(Up, Un, Up)\n UpUpUn = _triple_terms(Up, Up, Un)\n VpVnUp = _triple_terms(Vp, Vn, Up)\n VpVpUn = _triple_terms(Vp, Vp, Un)\n VpUnUp = _triple_terms(Vp, Un, Up)\n VpUpUn = _triple_terms(Vp, Up, Un)\n UpVnUp = _triple_terms(Up, Vn, Up)\n UpVpUn = _triple_terms(Up, Vp, Un)\n gpUpUngpOp = _triple_terms(dUpdp, Un, dOpdp)\n gpUpUpgpOn = _triple_terms(dUpdp, Up, dOndp)\n gpVpVngpOp = _triple_terms(dVpdp, Vn, dOpdp)\n gpVpVpgpOn = _triple_terms(dVpdp, Vp, dOndp)\n glUpUnglVpc = _triple_terms(dUpdl, Un, dVpcdl)\n glUpUpglVnc = _triple_terms(dUpdl, Up, dVncdl)\n glVpVnglVpc = _triple_terms(dVpdl, Vn, dVpcdl)\n glVpVpglVnc = _triple_terms(dVpdl, Vp, dVncdl)\n\n Ln_int = -(1j * Up['n']) / (utils.constants().R_earth * cos_lat) * \\\n (UpUnUp - UpUpUn) + \\\n (1j * Vp['n']) / (utils.constants().R_earth * cos_lat) * \\\n (VpVnUp - VpVpUn) - \\\n tan_lat / utils.constants().R_earth * \\\n (VpUnUp + VpUpUn) + \\\n tan_lat / utils.constants().R_earth * \\\n (UpVnUp + UpVpUn) + \\\n (gpUpUngpOp + gpUpUpgpOn) + \\\n (gpVpVngpOp + gpVpVpgpOn) + \\\n 1 / (utils.constants().R_earth * cos_lat) * \\\n (glUpUnglVpc + glUpUpglVnc + glVpVnglVpc + glVpVpglVnc)\n energies['Ln_int'] = Ln_int\n if integrate:\n Ln = abs(_int_over_atmos(Ln_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]\n energies['Ln'] = Ln\n \n if ('Ca' in terms):\n # Compute the rate of transfer of zonal available potential energy to eddy \n # available potential energy in wavenumber n, Rn -----\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'B' not in locals():\n Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / \n len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))\n theta = temp * p_kap\n theta_Z = theta.mean(dim=lon_name)\n theta_Za = theta_Z - theta_A\n dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))\n Rn_int = gamma * utils.constants().C_pd * ((dtemp_Zdlat / utils.constants().R_earth) * (Vp * Bn + Vn * Bp) + \n (p_kap * dtheta_Zadp) * (Op * Bn + On * Bp)) # [W/kg]\n energies['Rn_int'] = Rn_int\n if integrate:\n Rn = abs(_int_over_atmos(Rn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]\n energies['Rn'] = Rn\n\n if ('Ce' in terms):\n # Compute the rate of conversion of available potential energy of wavenumber n \n # to eddy kinetic energy of wavenumber n, Cn -----\n if vgradz:\n if 'U' not in locals():\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / \n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Ap, An = _truncate(utils.fft(gh, dim=lon_name, nfft=len(gh[lon_name]), twosided=True, shift=True) / \n len(gh[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n\n dApdlat = utils.differentiate_wrt(Ap, dim=lat_name, x=(Ap[lat_name] * degtorad))\n dAndlat = utils.differentiate_wrt(An, dim=lat_name, x=(An[lat_name] * degtorad))\n\n Cn_int = (((-1j * utils.constants().g * Up['n']) / \\\n (utils.constants().R_earth * xr.ufuncs.cos(Up[lat_name] * degtorad))) * \\\n (Ap * Un - An * Up)) - \\\n ((utils.constants().g / utils.constants().R_earth) * \\\n (dApdlat * Vn + dAndlat * Vp)) # [W/kg]\n energies['Cn_int'] = Cn_int\n if integrate:\n Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]\n energies['Cn'] = Cn\n else:\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'B' not in locals():\n Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) / \n len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n Cn_int = - (utils.constants().R_d / (omega[plevel_name] * 100)) * (Op * Bn + On * Bp) # [W/kg]\n energies['Cn_int'] = Cn_int\n if integrate:\n Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]\n energies['Cn'] = Cn\n \n if ('Ck' in terms):\n # Compute the rate of transfer of kinetic energy to the zonally averaged flow \n # from eddies of wavenumber n, Mn -----\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n if 'u_Z' not in locals():\n u_Z = u.mean(dim=lon_name)\n if 'U' not in locals():\n Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) / \n len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'V' not in locals():\n Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) / \n len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n if 'O' not in locals():\n Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) / \n len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)\n dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v[lat_name] * degtorad))\n du_Zndlat = utils.differentiate_wrt(u_Z / xr.ufuncs.cos(u[lat_name] * degtorad), \n dim=lat_name, x=(u[lat_name] * degtorad))\n dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v[plevel_name] * 100))\n du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u[plevel_name] * 100))\n\n Mn_int = (-2 * Up * Un * v_Z * tan_lat / utils.constants().R_earth) + \\\n (2 * Vp * Vn * dv_Zdlat / utils.constants().R_earth + (Vp * On + Vn * Op) * dv_Zdp) + \\\n ((Up * On + Un * Op) * du_Zdp) + \\\n ((Up * Vn + Un * Vp) * xr.ufuncs.cos(u[lat_name] * degtorad) / \\\n utils.constants().R_earth * du_Zndlat) # [W/kg]\n energies['Mn_int'] = Mn_int\n if integrate:\n Mn = abs(_int_over_atmos(Mn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]\n energies['Mn'] = Mn\n \n else:\n \n if ('Pe' in terms):\n # Compute the total eddy available potential energy, Pe [also commonly called \n # Ae] -----\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n temp_z = temp - temp_Z\n Pe_int = gamma * utils.constants().C_pd / 2 * (temp_z ** 2).mean(dim=lon_name) # [J/kg]\n energies['Pe_int'] = Pe_int\n if integrate:\n Pe = _int_over_atmos(Pe_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]\n energies['Pe'] = Pe\n \n if ('Ke' in terms):\n # Compute the total eddy kinetic energy, Ke -----\n if 'u_Z' not in locals():\n u_Z = u.mean(dim=lon_name)\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n u_z = u - u_Z\n v_z = v - v_Z\n Ke_int = 0.5 * (u_z ** 2 + v_z ** 2).mean(dim=lon_name) # [J/kg]\n energies['Ke_int'] = Ke_int\n if integrate:\n Ke = _int_over_atmos(Ke_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]\n energies['Ke'] = Ke\n \n if ('Ca' in terms):\n # Compute the rate of transfer of total available potential energy in the zonally \n # averaged temperature distribution (Pz) to total eddy available potential energy \n # (Pe), Ca -----\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n if 'omega_Z' not in locals():\n omega_Z = omega.mean(dim=lon_name)\n if 'theta_Z' not in locals():\n theta = temp * p_kap\n theta_Z = theta.mean(dim=lon_name)\n if 'dtemp_Zdlat' not in locals():\n dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))\n v_z = v - v_Z\n temp_z = temp - temp_Z\n omega_z = omega - omega_Z\n oT_Z = (omega_z * temp_z).mean(dim=lon_name)\n oT_A = utils.average(omega_z * temp_z, [lat_name, lon_name], weights=cos_lat)\n oT_Za = oT_Z - oT_A\n theta_Za = theta_Z - theta_A\n dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))\n Ca_int = - gamma * utils.constants().C_pd * \\\n (((v_z * temp_z).mean(dim=lon_name) * dtemp_Zdlat / utils.constants().R_earth) + \\\n (p_kap * oT_Za * dtheta_Zadp)) # [W/kg]\n energies['Ca_int'] = Ca_int\n if integrate:\n Ca = _int_over_atmos(Ca_int, lat_name, lon_name, plevel_name, lon_dim=v[lon_name]) # [W/m^2]\n energies['Ca'] = Ca\n \n if ('Ce' in terms):\n # Compute the rate of transfer of total eddy available potential energy (Pe) to \n # total eddy kinetic energy (Ke), Ce -----\n if 'temp_Z' not in locals():\n temp_Z = temp.mean(dim=lon_name)\n if 'omega_Z' not in locals():\n omega_Z = omega.mean(dim=lon_name)\n temp_z = temp - temp_Z\n omega_z = omega - omega_Z\n Ce_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * \\\n (omega_z * temp_z).mean(dim=lon_name) # [W/kg] \n energies['Ce_int'] = Ce_int\n if integrate:\n Ce = _int_over_atmos(Ce_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Ce'] = Ce\n \n if ('Ck' in terms):\n # Compute the rate of transfer of total eddy kinetic energy (Ke) to total kinetic \n # energy in zonally averaged motion (Kz), Ck -----\n if 'u_Z' not in locals():\n u_Z = u.mean(dim=lon_name)\n if 'v_Z' not in locals():\n v_Z = v.mean(dim=lon_name)\n if 'omega_Z' not in locals():\n omega_Z = omega.mean(dim=lon_name)\n u_z = u - u_Z\n v_z = v - v_Z\n omega_z = omega - omega_Z\n du_Zndlat = utils.differentiate_wrt(u_Z / cos_lat, dim=lat_name, x=(u_Z[lat_name] * degtorad))\n dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v_Z[lat_name] * degtorad))\n du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u_Z[plevel_name] * 100))\n dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v_Z[plevel_name] * 100))\n Ck_int = (u_z * v_z).mean(dim=lon_name) * cos_lat * du_Zndlat / utils.constants().R_earth + \\\n (u_z * omega_z).mean(dim=lon_name) * du_Zdp + \\\n (v_z ** 2).mean(dim=lon_name) * dv_Zdlat / utils.constants().R_earth + \\\n (v_z * omega_z).mean(dim=lon_name) * dv_Zdp - \\\n (u_z ** 2).mean(dim=lon_name) * v_Z * tan_lat / utils.constants().R_earth\n energies['Ck_int'] = Ck_int\n if integrate:\n Ck = _int_over_atmos(Ck_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Ck'] = Ck\n \n if ('Gz' in terms):\n # Compute the rate of generation of zonal available potential energy due to the zonally\n # averaged heating, Gz -----\n if ('Cz' not in terms) | ('Ca' not in terms):\n raise ValueError('The rate of generation of zonal available potential energy, Gz, is computed from the sum of Cz and Ca. Please add these to the list, terms=[<terms>].')\n if spectral:\n warnings.warn('Rate of generation of zonal available potential energy is computed from the sum of Cz and Ca and cannot be computed in Fourier space. Returning Gz in real-space.')\n Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca\n Gz_int = Cz_int + Ca_int\n energies['Gz_int'] = Gz_int\n if integrate:\n Gz = _int_over_atmos(Gz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Gz'] = Gz\n\n if ('Ge' in terms):\n # Compute the rate of generation of eddy available potential energy (Ae), Ge -----\n if ('Ce' not in terms) | ('Ca' not in terms):\n raise ValueError('The rate of generation of eddy available potential energy, Ge, is computed from the residual of Ce and Ca. Please add these to the list, terms=[<terms>].')\n if spectral:\n warnings.warn('The rate of generation of eddy available potential energy is computed from the residual of Ce and Ca and cannot be computed in Fourier space. Returning Ge in real-space.')\n Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce\n if 'Ca_int' not in locals():\n Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca\n Ge_int = Ce_int - Ca_int\n energies['Ge_int'] = Ge_int\n if integrate:\n Ge = _int_over_atmos(Ge_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Ge'] = Ge\n \n if ('Dz' in terms):\n # Compute the rate of viscous dissipation of zonal kinetic energy, Dz -----\n if ('Cz' not in terms) | ('Ck' not in terms):\n raise ValueError('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck. Please add these to the list, terms=[<terms>].')\n if spectral: \n warnings.warn('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck and cannot be computed in Fourier space. Returning De in real-space.')\n Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck\n Dz_int = Cz_int - Ck_int\n energies['Dz_int'] = Dz_int\n if integrate:\n Dz = _int_over_atmos(Dz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['Dz'] = Dz\n\n if ('De' in terms):\n # Compute the rate of dissipation of eddy kinetic energy (Ke), De -----\n if ('Ce' not in terms) | ('Ck' not in terms):\n raise ValueError('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck. Please add these to the list, terms=[<terms>].')\n if spectral:\n warnings.warn('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck and cannot be computed in Fourier space. Returning De in real-space.')\n if 'Ce_int' not in locals():\n Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce\n if 'Ck_int' not in locals():\n Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck\n De_int = Ce_int - Ck_int\n energies['De_int'] = De_int\n if integrate:\n De = _int_over_atmos(De_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]\n energies['De'] = De\n \n return energies\n\n\n# ===================================================================================================\ndef auto_merge(paths, preprocess=None, parallel=True, **kwargs):\n \"\"\"\n Automatically merge a split xarray Dataset. This is designed to behave like\n `xarray.open_mfdataset`, except it supports concatenation along multiple\n dimensions.\n Parameters\n ----------\n datasets : str or list of str or list of xarray.Dataset\n Either a glob expression or list of paths as you would pass to\n xarray.open_mfdataset, or a list of xarray datasets. If a list of\n datasets is passed, you should make sure that they are represented\n as dask arrays to avoid reading the whole dataset into memory.\n Returns\n -------\n xarray.Dataset\n The merged dataset.\n \"\"\"\n \n if parallel:\n # wrap the open_dataset, getattr, and preprocess with delayed\n open_ = dask.delayed(xr.open_dataset)\n getattr_ = dask.delayed(getattr)\n if preprocess is not None:\n preprocess = dask.delayed(preprocess)\n else:\n open_ = open_dataset\n getattr_ = getattr\n\n datasets = [open_(p, **kwargs) for p in paths]\n file_objs = [getattr_(ds, '_file_obj') for ds in datasets]\n if preprocess is not None:\n datasets = [preprocess(ds) for ds in datasets]\n\n if parallel:\n # calling compute here will return the datasets/file_objs lists,\n # the underlying datasets will still be stored as dask arrays\n datasets, file_objs = dask.compute(datasets, file_objs)\n\n def _combine_along_last_dim(datasets):\n merged = []\n\n # Determine the dimension along which the dataset is split\n split_dims = [d for d in datasets[0].dims if\n len(np.unique([ds[d].values[0] for ds in datasets])) > 1]\n\n # Concatenate along one of the split dimensions\n concat_dim = split_dims[-1]\n\n # Group along the remaining dimensions and concatenate within each\n # group.\n sorted_ds = sorted(datasets, key=lambda ds: tuple(ds[d].values[0]\n for d in split_dims))\n for _, group in itertools.groupby(\n sorted_ds,\n key=lambda ds: tuple(ds[d].values[0] for d in split_dims[:-1])\n ):\n merged.append(xr.auto_combine(group, concat_dim=concat_dim))\n\n return merged\n\n merged = datasets\n while len(merged) > 1:\n merged = _combine_along_last_dim(merged)\n\n return merged[0]" ]
[ [ "pandas.infer_freq", "matplotlib.pyplot.tight_layout", "numpy.linspace", "numpy.unique", "numpy.isnan", "numpy.ndarray.flatten", "numpy.datetime64", "numpy.timedelta64", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.subplot", "matplotlib.rcParams.update", "numpy.shape", "matplotlib.ticker.MaxNLocator", "matplotlib.ticker.FixedLocator", "pandas.Timestamp", "matplotlib.rc", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gonzalo-munillag/Private_AI_OpenMined
[ "c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca", "c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca" ]
[ "Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py", "Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_i_renyi.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\" Demo for exponentiated Jensen-Tsallis kernel-1 estimators.\n\nAnalytical vs estimated value is illustrated for spherical normal random\nvariables.\n\n\"\"\"\n\nfrom numpy import eye\nfrom numpy.random import rand, multivariate_normal, randn\nfrom scipy import arange, zeros, ones\nimport matplotlib.pyplot as plt\n\nfrom ite.cost.x_factory import co_factory\nfrom ite.cost.x_analytical_values import analytical_value_k_ejt1\n\n\ndef main():\n # parameters:\n dim = 1 # dimension of the distribution\n num_of_samples_v = arange(1000, 50*1000+1, 2000)\n u = 0.8 # >0, parameter of the Jensen-Tsallis kernel\n cost_name = 'MKExpJT1_HT' # dim >= 1\n\n # initialization:\n alpha = 2\n # fixed; parameter of the Jensen-Tsallis kernel; for alpha = 2 we have\n # explicit formula for the Tsallis entropy, and hence for the\n # Jensen-Tsallis kernel(-1).\n\n distr = 'normal' # fixed\n num_of_samples_max = num_of_samples_v[-1]\n length = len(num_of_samples_v)\n co = co_factory(cost_name, mult=True, alpha=alpha, u=u) # cost object\n k_hat_v = zeros(length) # vector of estimated kernel values\n\n # distr, dim -> samples (y1,y2), distribution parameters (par1,par2), \n # analytical value (k):\n if distr == 'normal':\n # generate samples (y1,y2); y1~N(m1,s1^2xI), y2~N(m2,s2^2xI):\n m1, s1 = randn(dim), rand(1)\n m2, s2 = randn(dim), rand(1)\n y1 = multivariate_normal(m1, s1**2 * eye(dim), num_of_samples_max)\n y2 = multivariate_normal(m2, s2**2 * eye(dim), num_of_samples_max)\n \n par1 = {\"mean\": m1, \"std\": s1}\n par2 = {\"mean\": m2, \"std\": s2}\n else:\n raise Exception('Distribution=?') \n \n k = analytical_value_k_ejt1(distr, distr, u, par1, par2)\n \n # estimation:\n for (tk, num_of_samples) in enumerate(num_of_samples_v):\n k_hat_v[tk] = co.estimation(y1[0:num_of_samples],\n y2[0:num_of_samples]) # broadcast\n print(\"tk={0}/{1}\".format(tk+1, length))\n \n # plot: \n plt.plot(num_of_samples_v, k_hat_v, num_of_samples_v, ones(length)*k)\n plt.xlabel('Number of samples')\n plt.ylabel('Exponentiated Jensen-Tsallis kernel-1')\n plt.legend(('estimation', 'analytical value'), loc='best')\n plt.title(\"Estimator: \" + cost_name)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n", "#!/usr/bin/env python3\n\n\"\"\" Demo for Renyi mutual information estimators.\n\nAnalytical vs estimated value is illustrated for normal random variables.\n\n\"\"\"\n\nfrom numpy.random import rand, multivariate_normal\nfrom numpy import arange, zeros, dot, ones\nimport matplotlib.pyplot as plt\n\nfrom ite.cost.x_factory import co_factory\nfrom ite.cost.x_analytical_values import analytical_value_i_renyi\n\n\ndef main():\n # parameters:\n alpha = 0.7 # parameter of Renyi mutual information, \\ne 1\n dim = 2 # >=2; dimension of the distribution\n num_of_samples_v = arange(100, 10*1000+1, 500)\n\n cost_name = 'MIRenyi_DR'\n # cost_name = 'MIRenyi_HR'\n \n # initialization:\n distr = 'normal' # distribution; fixed \n ds = ones(dim, dtype='int') # dimensions of the 'subspaces'\n num_of_samples_max = num_of_samples_v[-1]\n length = len(num_of_samples_v)\n co = co_factory(cost_name, mult=True, alpha=alpha) # cost object\n # vector of estimated mutual information values:\n i_hat_v = zeros(length)\n\n # distr, dim -> samples (y), distribution parameters (par), analytical \n # value (i):\n if distr == 'normal':\n # mean (m), covariance matrix (c):\n m = rand(dim) \n l = rand(dim, dim)\n c = dot(l, l.T)\n \n # generate samples (y~N(m,c)): \n y = multivariate_normal(m, c, num_of_samples_max)\n \n par = {\"cov\": c} \n else:\n raise Exception('Distribution=?')\n \n i = analytical_value_i_renyi(distr, alpha, par)\n \n # estimation:\n for (tk, num_of_samples) in enumerate(num_of_samples_v):\n i_hat_v[tk] = co.estimation(y[0:num_of_samples], ds) # broadcast\n print(\"tk={0}/{1}\".format(tk+1, length))\n \n # plot: \n plt.plot(num_of_samples_v, i_hat_v, num_of_samples_v, ones(length)*i)\n plt.xlabel('Number of samples')\n plt.ylabel('Renyi mutual information')\n plt.legend(('estimation', 'analytical value'), loc='best')\n plt.title(\"Estimator: \" + cost_name)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "scipy.zeros", "numpy.eye", "numpy.random.randn", "numpy.random.rand", "scipy.arange", "scipy.ones", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "matplotlib.pyplot.legend", "numpy.dot", "matplotlib.pyplot.title", "numpy.arange", "numpy.random.multivariate_normal", "numpy.ones", "numpy.random.rand", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jiangzoi/incubator-tvm
[ "144c6f45f7217b9df2f5605e06f0903e470ac11c", "144c6f45f7217b9df2f5605e06f0903e470ac11c", "144c6f45f7217b9df2f5605e06f0903e470ac11c", "144c6f45f7217b9df2f5605e06f0903e470ac11c", "144c6f45f7217b9df2f5605e06f0903e470ac11c", "144c6f45f7217b9df2f5605e06f0903e470ac11c", "144c6f45f7217b9df2f5605e06f0903e470ac11c" ]
[ "tests/python/contrib/test_gemm_acc32_vnni.py", "tests/python/relay/test_pass_auto_quantize.py", "topi/tests/python/test_topi_dense_tensorcore.py", "apps/extension/tests/test_ext.py", "tests/python/relay/test_adt.py", "tests/python/relay/dyn/test_dynamic_op_level3.py", "apps/ios_rpc/tests/ios_rpc_mobilenet.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition\n\nimport tvm\nfrom tvm import te\nimport numpy as np\nfrom topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake\nfrom topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32\nimport pytest\n\n\[email protected](\"skip because feature not enabled\")\ndef test_fc_int8_acc32():\n m = 1024\n n = 1024\n k = 1024\n\n X = te.placeholder((m, k), name='X', dtype=\"uint8\")\n W = te.placeholder((n, k), name='W', dtype=\"int8\")\n\n peak = 280\n print(\"Peak {} Gops/s\".format(peak))\n memory_ops = m * k + n * k + 2 * m * n\n gops_per_mm = 2 * m * n * k\n\n # For LLVM < 8.0, it shows \"'cascadelake' is not a recognized processor for this target\n # (ignoring processor)\" error with the following setting. After LLVM 8.0 is enabled in the\n # test, we should use cascadelake setting.\n def verify(target=\"llvm -mcpu=cascadelake\"):\n if not tvm.runtime.enabled(target):\n print(\"skip because %s is not enabled...\" % target)\n return\n\n ctx = tvm.context(target, 0)\n pc = dot_16x1x16_uint8_int8_int32_cascadelake()\n ak = te.reduce_axis((0, k), name='k')\n packedW = te.placeholder(\n (n // 16, 16 * (k // 4), 4), name='packedW', dtype=\"int8\")\n\n t_fc = te.compute((m, n), lambda i, j: te.sum(X[i, ak].astype(\n \"int32\") * packedW[j / 16, (ak / 4) * 16 + j % 16, ak % 4].astype(\"int32\"), axis=ak), name=\"F\")\n t_sch = te.create_schedule(t_fc.op)\n a_x, a_y = t_fc.op.axis\n a_k, = t_fc.op.reduce_axis\n\n a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16)\n a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32)\n a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4)\n a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4)\n t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki)\n\n t_sch[t_fc].unroll(a_koi)\n t_sch[t_fc].tensorize(a_yi, pc)\n\n t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name=\"intrinsic\")\n t_evaluator = t_func.time_evaluator(t_func.entry_name, ctx, number=10)\n\n # generate the plain data\n a_ = np.random.uniform(1, 10, size=(m, k)).astype(\"uint8\")\n b_ = np.random.uniform(1, 10, size=(n, k)).astype(\"int8\")\n\n packW = np.random.uniform(1, 10, size=(\n n // 16, 16 * (k // 4), 4)).astype(\"int8\")\n # This occurs in pre_compute stage\n for r_idx in range(n // 16):\n for s_idx in range(16 * (k // 4)):\n for t_idx in range(4):\n packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx %\n 16][(s_idx // 16) * 4 + t_idx]\n\n x = tvm.nd.array(a_, ctx)\n w = tvm.nd.array(packW, ctx)\n y = tvm.nd.array(np.zeros((m, n), dtype=\"int32\"), ctx)\n result = t_evaluator(x, w, y)\n\n gops_per_sec = gops_per_mm / result.mean / 1e9\n # verify the correctness\n tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0)\n print('Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}'.format(\n result.mean * 1000, gops_per_sec, gops_per_sec / peak))\n t_func.export_library(\"tensorize_acc32.o\")\n\n verify()\n\n\nif __name__ == \"__main__\":\n # The test requires Cascade Lake and newer Intel machines to generate the\n # correct AVX512 VNNI instruction. So, disabling the test.\n\n # test_fc_int8_acc32()\n pass\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport pytest\n\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.relay import testing\nfrom tvm.relay.expr import Call\n\n\ndef quantize_and_build(out):\n f = relay.Function(relay.analysis.free_vars(out), out)\n mod, params = testing.create_workload(f)\n\n with relay.quantize.qconfig(skip_conv_layers=[]):\n qmod = relay.quantize.quantize(mod, params)\n\n relay.build(qmod, \"llvm\", params=params)\n\n return qmod\n\ndef test_mul_rewrite():\n \"\"\"a test case where rhs of mul is not constant\"\"\"\n data = relay.var(\"data\", shape=(1, 16, 64, 64))\n multiplier = relay.sigmoid(relay.var(\"data\", shape=(1, 16, 1, 1)))\n conv = relay.nn.conv2d(data, relay.var(\"weight\"),\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=16)\n act = relay.nn.relu(data=conv)\n\n quantize_and_build(act * multiplier)\n\n pool = relay.nn.global_avg_pool2d(data=act)\n\n quantize_and_build(act * pool)\n\ndef test_batch_flatten_rewrite():\n\n data = relay.var(\"data\", shape=(1, 16, 64, 64), dtype=\"float32\")\n\n out = relay.nn.conv2d(data, relay.var(\"weight\"),\n kernel_size=(3, 3),\n padding=(1, 1),\n channels=16)\n\n out = relay.nn.batch_flatten(out)\n\n qmod = quantize_and_build(out)\n\n def _check_batch_flatten(node):\n if isinstance(node, Call):\n if(node.op.name == \"nn.batch_flatten\"):\n assert node.checked_type.dtype == \"int8\"\n\n # check if batch_flatten is quantized\n relay.analysis.post_order_visit(qmod[\"main\"], _check_batch_flatten)\n\ndef get_calibration_dataset(input_name):\n dataset = []\n for i in range(5):\n data = np.random.uniform(size=(1, 3, 224, 224))\n dataset.append({input_name: data})\n return dataset\n\n\[email protected](\"create_target\", [True, False])\ndef test_calibrate_target(create_target):\n mod, params = testing.resnet.get_workload(num_layers=18)\n dataset = get_calibration_dataset(\"data\")\n with relay.quantize.qconfig(calibrate_mode=\"kl_divergence\"):\n if create_target:\n with tvm.target.create(\"llvm\"):\n relay.quantize.quantize(mod, params, dataset)\n else:\n # current_target = None\n relay.quantize.quantize(mod, params, dataset)\n\n\ndef test_calibrate_memory_bound():\n mod, params = testing.resnet.get_workload(num_layers=18)\n dataset = get_calibration_dataset(\"data\")\n import multiprocessing\n num_cpu = multiprocessing.cpu_count()\n with relay.quantize.qconfig(calibrate_mode=\"kl_divergence\",\n calibrate_chunk_by=num_cpu):\n relay.quantize.quantize(mod, params, dataset)\n\n\nif __name__ == \"__main__\":\n test_mul_rewrite()\n test_batch_flatten_rewrite()\n test_calibrate_target(False)\n test_calibrate_target(True)\n test_calibrate_memory_bound()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument\n\"\"\"Test code for dense tensorcore operator\"\"\"\nimport numpy as np\nimport tvm\nimport topi\nimport topi.testing\nfrom topi.util import get_const_tuple\nfrom tvm import te\nfrom tvm.contrib.pickle_memoize import memoize\nfrom tvm.contrib import nvcc\n\n\n_dense_implement = {\n \"gpu\": [(topi.cuda.dense_tensorcore, topi.cuda.schedule_dense_tensorcore)]\n}\n\ndef verify_dense(batch, in_dim, out_dim, use_bias=True):\n \"\"\"Dense tensorcore verify function\"\"\"\n A = te.placeholder((batch, in_dim), name='A')\n B = te.placeholder((out_dim, in_dim), name='B')\n C = te.placeholder((out_dim,), name='C')\n dtype = A.dtype\n\n # use memoize to pickle the test data for next time use\n @memoize(\"topi.tests.test_topi_dense_tensorcore\")\n def get_ref_data():\n a_np = np.random.uniform(size=(batch, in_dim)).astype(dtype)\n b_np = np.random.uniform(size=(out_dim, in_dim)).astype(dtype)\n c_np = np.random.uniform(size=(out_dim,)).astype(dtype)\n if use_bias:\n d_np = np.maximum(np.dot(a_np, b_np.T) + c_np, 0.0)\n else:\n d_np = np.maximum(np.dot(a_np, b_np.T), 0.0)\n return (a_np, b_np, c_np, d_np)\n # get the test data\n a_np, b_np, c_np, d_np = get_ref_data()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n if not nvcc.have_tensorcore(ctx.compute_version):\n print(\"skip because gpu does not support Tensor Cores\")\n return\n print(\"Running on target: %s\" % device)\n for fcompute, fschedule in topi.testing.dispatch(device, _dense_implement):\n with tvm.target.create(device):\n D = fcompute(A, B, C if use_bias else None)\n D = topi.nn.relu(D)\n s = fschedule([D])\n a = tvm.nd.array(a_np, ctx)\n b = tvm.nd.array(b_np, ctx)\n c = tvm.nd.array(c_np, ctx)\n d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), ctx)\n f = tvm.build(s, [A, B, C, D], device, name=\"dense\")\n f(a, b, c, d)\n tvm.testing.assert_allclose(d.asnumpy(), d_np, rtol=1e-3)\n\n\n for device in ['cuda']:\n check_device(device)\n\n\ndef test_dense_tensorcore():\n \"\"\"Test cases\"\"\"\n verify_dense(8, 16, 32, use_bias=True)\n verify_dense(16, 32, 16, use_bias=True)\n verify_dense(256, 1024, 1024, use_bias=True)\n verify_dense(1000, 1024, 1024, use_bias=False)\n verify_dense(256, 2048, 1000, use_bias=False)\n\n\nif __name__ == \"__main__\":\n test_dense_tensorcore()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm_ext\nimport tvm\nimport tvm._ffi.registry\nfrom tvm import te\nimport numpy as np\n\ndef test_bind_add():\n def add(a, b):\n return a + b\n f = tvm_ext.bind_add(add, 1)\n assert f(2) == 3\n\ndef test_ext_dev():\n n = 10\n A = te.placeholder((n,), name='A')\n B = te.compute((n,), lambda *i: A(*i) + 1.0, name='B')\n s = te.create_schedule(B.op)\n def check_llvm():\n if not tvm.runtime.enabled(\"llvm\"):\n return\n f = tvm.build(s, [A, B], \"ext_dev\", \"llvm\")\n ctx = tvm.ext_dev(0)\n # launch the kernel.\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)\n b = tvm.nd.array(np.zeros(n, dtype=B.dtype), ctx)\n f(a, b)\n tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1)\n check_llvm()\n\n\ndef test_sym_add():\n a = te.var('a')\n b = te.var('b')\n c = tvm_ext.sym_add(a, b)\n assert c.a == a and c.b == b\n\n\ndef test_ext_vec():\n ivec = tvm_ext.ivec_create(1, 2, 3)\n assert(isinstance(ivec, tvm_ext.IntVec))\n assert ivec[0] == 1\n assert ivec[1] == 2\n\n def ivec_cb(v2):\n assert(isinstance(v2, tvm_ext.IntVec))\n assert v2[2] == 3\n\n tvm.runtime.convert(ivec_cb)(ivec)\n\n\ndef test_extract_ext():\n fdict = tvm._ffi.registry.extract_ext_funcs(\n tvm_ext._LIB.TVMExtDeclare)\n assert fdict[\"mul\"](3, 4) == 12\n\n\ndef test_extern_call():\n n = 10\n A = te.placeholder((n,), name='A')\n B = te.compute((n,), lambda *i: tvm.tir.call_extern(\"float32\", \"TVMTestAddOne\", A(*i)), name='B')\n s = te.create_schedule(B.op)\n\n def check_llvm():\n if not tvm.runtime.enabled(\"llvm\"):\n return\n f = tvm.build(s, [A, B], \"llvm\")\n ctx = tvm.cpu(0)\n # launch the kernel.\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)\n b = tvm.nd.array(np.zeros(n, dtype=B.dtype), ctx)\n f(a, b)\n tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1)\n check_llvm()\n\n\ndef test_nd_subclass():\n a = tvm_ext.NDSubClass.create(additional_info=3)\n b = tvm_ext.NDSubClass.create(additional_info=5)\n assert isinstance(a, tvm_ext.NDSubClass)\n c = a + b\n d = a + a\n e = b + b\n assert(a.additional_info == 3)\n assert(b.additional_info == 5)\n assert(c.additional_info == 8)\n assert(d.additional_info == 6)\n assert(e.additional_info == 10)\n\n\nif __name__ == \"__main__\":\n test_nd_subclass()\n test_extern_call()\n test_ext_dev()\n test_ext_vec()\n test_bind_add()\n test_sym_add()\n test_extract_ext()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.relay.backend.interpreter import ConstructorValue\nfrom tvm.relay import create_executor\nfrom tvm.relay.prelude import Prelude, StaticTensorArrayOps\nfrom tvm.relay.testing import add_nat_definitions, count as count_, make_nat_value, make_nat_expr\n\nimport numpy as np\n\nmod = tvm.IRModule()\np = Prelude(mod)\nadd_nat_definitions(p)\n\ndef count(e):\n return count_(p, e)\n\nctx = tvm.context(\"llvm\", 0)\nintrp = create_executor(mod=mod, ctx=ctx, target=\"llvm\")\n\nz = p.z\ns = p.s\nnat = p.nat\ndouble = p.double\nadd = p.add\n\noptional = p.optional\nsome = p.some\nnone = p.none\n\nnil = p.nil\ncons = p.cons\nl = p.l\nhd = p.hd\ntl = p.tl\nnth = p.nth\nupdate = p.update\nlength = p.length\nmap = p.map\nfoldl = p.foldl\nfoldr = p.foldr\nfoldr1 = p.foldr1\nsum = p.sum\n\nconcat = p.concat\nfilter = p.filter\nzip = p.zip\nrev = p.rev\nunfoldl = p.unfoldl\nunfoldr = p.unfoldr\nmap_accumr = p.map_accumr\nmap_accuml = p.map_accuml\n\ntree = p.tree\nrose = p.rose\ntmap = p.tmap\nsize = p.size\n\ncompose = p.compose\niterate = p.iterate\n\n# this is an example of creating the adt value in python side\ndef make_nat(n):\n if n != 0:\n return ConstructorValue(s, [make_nat(n - 1)])\n else:\n return ConstructorValue(z, [])\n\ndef make_nat_expr(n):\n assert n >= 0\n ret = z()\n while n > 0:\n ret = s(ret)\n n = n - 1\n return ret\n\ndef to_list(l):\n assert isinstance(l, ConstructorValue)\n val = l\n ret = []\n while True:\n if val.tag == p.cons.tag:\n ret.append(val.fields[0])\n val = val.fields[1]\n else:\n assert val.tag == p.nil.tag\n break\n return ret\n\ndef tree_to_dict(t):\n assert isinstance(t, ConstructorValue)\n ret = {}\n assert t.tag == p.rose.tag\n ret['member'] = t.fields[0]\n ret['children'] = []\n for subtree in to_list(t.fields[1]):\n l = tree_to_dict(subtree)\n ret['children'].append(l)\n return ret\n\n\ndef vmobj_to_list(o, dtype=\"float32\"):\n if isinstance(o, tvm.nd.NDArray):\n return [o.asnumpy().tolist()]\n elif isinstance(o, tvm.runtime.container.ADT):\n if len(o) == 0:\n tensor_nil = p.get_var(\"tensor_nil\", dtype=dtype)\n if tensor_nil.tag == o.tag:\n return [0]\n return []\n\n result = []\n for f in o:\n result.extend(vmobj_to_list(f, dtype))\n return result\n elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):\n if o.constructor.name_hint == 'Cons':\n tl = vmobj_to_list(o.fields[1], dtype)\n hd = vmobj_to_list(o.fields[0], dtype)\n hd.extend(tl)\n return hd\n elif o.constructor.name_hint == 'Nil':\n return []\n elif 'tensor_nil' in o.constructor.name_hint:\n return [0]\n elif 'tensor' in o.constructor.name_hint:\n return [o.fields[0].asnumpy()]\n else:\n raise RuntimeError(\"Unknown object type: %s\" % o.constructor.name_hint)\n else:\n raise RuntimeError(\"Unknown object type: %s\" % type(o))\n\n\n# turns a scalar-valued relay tensor value into a python number\ndef get_scalar(tv):\n return tv.asnumpy().item()\n\n\ndef test_nat_value():\n assert count(make_nat_value(p, 10)) == 10\n assert count(intrp.evaluate(s(s(z())))) == 2\n\n\ndef test_nat_constructor():\n func = relay.Function([], z())\n test_z = relay.GlobalVar(\"test_z\")\n mod[test_z] = func\n assert mod[test_z].body.checked_type == nat()\n test_sz = relay.GlobalVar(\"test_sz\")\n func = relay.Function([], s(z()))\n mod[test_sz] = func\n assert mod[test_sz].body.checked_type == nat()\n\n\ndef test_double():\n assert mod[double].checked_type == relay.FuncType([nat()], nat())\n res = intrp.evaluate(double(s(z())))\n assert count(res) == 2\n\n\ndef test_add():\n assert mod[add].checked_type == relay.FuncType([nat(), nat()], nat())\n res = intrp.evaluate(add(s(z()), s(z())))\n assert count(res) == 2\n\n\ndef test_list_constructor():\n test_consz = relay.GlobalVar(\"test_consz\")\n func = relay.Function([], cons(z(), nil()))\n mod[test_consz] = func\n assert mod[test_consz].body.checked_type == l(nat())\n\ndef test_hd_tl():\n expected = list(range(10))\n l = nil()\n for i in reversed(expected):\n l = cons(make_nat_expr(i), l)\n\n got = []\n for i in range(len(expected)):\n got.append(count(intrp.evaluate(hd(l))))\n l = tl(l)\n\n assert got == expected\n\ndef test_nth():\n expected = list(range(10))\n l = nil()\n for i in reversed(expected):\n l = cons(relay.const(i), l)\n\n for i in range(len(expected)):\n item = intrp.evaluate(nth(l, relay.const(i)))\n assert get_scalar(item) == i\n\n\ndef test_update():\n expected = list(range(10))\n l = nil()\n # create zero initialized list\n for i in range(len(expected)):\n l = cons(make_nat_expr(0), l)\n\n # set value\n for i, v in enumerate(expected):\n l = update(l, relay.const(i), make_nat_expr(v))\n\n got = []\n for i in range(len(expected)):\n got.append(count(intrp.evaluate(nth(l, relay.const(i)))))\n\n assert got == expected\n\ndef test_length():\n a = relay.TypeVar(\"a\")\n assert mod[length].checked_type == relay.FuncType([l(a)], relay.scalar_type('int32'), [a])\n res = intrp.evaluate(length(cons(z(), cons(z(), cons(z(), nil())))))\n assert get_scalar(res) == 3\n\n\ndef test_map():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n lhs = mod[map].checked_type\n rhs = relay.FuncType([relay.FuncType([a], b), l(a)], l(b), [a, b])\n assert lhs == rhs\n\n x = relay.Var(\"x\")\n add_one = relay.Function([x], s(x))\n res = intrp.evaluate(map(add_one, cons(z(), cons(z(), nil()))))\n ones = to_list(res)\n assert len(ones) == 2\n assert count(ones[0]) == 1 and count(ones[1]) == 1\n\n\ndef test_foldl():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n lhs = mod[foldl].checked_type\n rhs = relay.FuncType([relay.FuncType([a, b], a), a, l(b)], a, [a, b])\n assert lhs == rhs\n\n x = relay.Var(\"x\")\n y = relay.Var(\"y\")\n rev_dup = relay.Function([y, x], cons(x, cons(x, y)))\n res = intrp.evaluate(foldl(rev_dup, nil(),\n cons(make_nat_expr(1),\n cons(make_nat_expr(2),\n cons(make_nat_expr(3), nil())))))\n reversed = to_list(res)\n assert len(reversed) == 6\n assert count(reversed[0]) == 3 and count(reversed[1]) == 3\n assert count(reversed[2]) == 2 and count(reversed[3]) == 2\n assert count(reversed[4]) == 1 and count(reversed[5]) == 1\n\n\ndef test_foldr():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n lhs = mod[foldr].checked_type\n rhs = relay.FuncType([relay.FuncType([a, b], b), b, l(a)], b, [a, b])\n assert lhs == rhs\n\n x = relay.Var(\"x\")\n y = relay.Var(\"y\")\n identity = relay.Function([x, y], cons(x, y))\n res = intrp.evaluate(foldr(identity, nil(),\n cons(make_nat_expr(1),\n cons(make_nat_expr(2),\n cons(make_nat_expr(3), nil())))))\n same = to_list(res)\n assert len(same) == 3\n assert count(same[0]) == 1 and count(same[1]) == 2 and count(same[2]) == 3\n\n\ndef test_foldr1():\n a = relay.TypeVar(\"a\")\n lhs = mod[p.foldr1].checked_type\n rhs = relay.FuncType([relay.FuncType([a, a], a), l(a)], a, [a])\n assert lhs == rhs\n\n x = relay.Var(\"x\")\n y = relay.Var(\"y\")\n f = relay.Function([x, y], add(x, y))\n res = intrp.evaluate(foldr1(f,\n cons(make_nat_expr(1),\n cons(make_nat_expr(2),\n cons(make_nat_expr(3), nil())))))\n\n assert count(res) == 6\n\n\ndef test_sum():\n assert mod[sum].checked_type == relay.FuncType([l(relay.scalar_type('int32'))], relay.scalar_type('int32'))\n res = intrp.evaluate(sum(cons(relay.const(1), cons(relay.const(2), nil()))))\n assert get_scalar(res) == 3\n\n\ndef test_concat():\n a = relay.TypeVar(\"a\")\n assert mod[concat].checked_type == relay.FuncType([l(a), l(a)], l(a), [a])\n\n l1 = cons(make_nat_expr(1), cons(make_nat_expr(2), nil()))\n l2 = cons(make_nat_expr(3), cons(make_nat_expr(4), nil()))\n res = intrp.evaluate(concat(l1, l2))\n\n catted = to_list(res)\n assert len(catted) == 4\n assert count(catted[0]) == 1\n assert count(catted[1]) == 2\n assert count(catted[2]) == 3\n assert count(catted[3]) == 4\n\n\ndef test_filter():\n a = relay.TypeVar(\"a\")\n expected_type = relay.FuncType([\n relay.FuncType([a], relay.scalar_type(\"bool\")), l(a)\n ], l(a), [a])\n assert mod[filter].checked_type == expected_type\n\n x = relay.Var(\"x\", nat())\n greater_than_one = relay.Function(\n [x],\n relay.Match(x, [\n relay.Clause(\n relay.PatternConstructor(s, [\n relay.PatternConstructor(\n s, [relay.PatternWildcard()])\n ]),\n relay.const(True)),\n relay.Clause(relay.PatternWildcard(), relay.const(False))\n ]))\n res = intrp.evaluate(\n filter(greater_than_one,\n cons(make_nat_expr(1),\n cons(make_nat_expr(1),\n cons(make_nat_expr(3),\n cons(make_nat_expr(1),\n cons(make_nat_expr(5),\n cons(make_nat_expr(1),\n nil()))))))))\n filtered = to_list(res)\n assert len(filtered) == 2\n assert count(filtered[0]) == 3\n assert count(filtered[1]) == 5\n\n\ndef test_zip():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n expected_type = relay.FuncType([l(a), l(b)],\n l(relay.TupleType([a, b])), [a, b])\n assert mod[zip].checked_type == expected_type\n\n l1 = cons(make_nat_expr(1), cons(make_nat_expr(2), cons(make_nat_expr(3), nil())))\n l2 = cons(nil(),\n cons(cons(nil(), nil()),\n cons(cons(nil(), cons(nil(), nil())),\n nil())))\n\n res = intrp.evaluate(zip(l1, l2))\n zipped = to_list(res)\n assert len(zipped) == 3\n assert count(zipped[0][0]) == 1\n assert len(to_list(zipped[0][1])) == 0\n assert count(zipped[1][0]) == 2\n assert len(to_list(zipped[1][1])) == 1\n assert count(zipped[2][0]) == 3\n assert len(to_list(zipped[2][1])) == 2\n\n # test truncation\n l3 = cons(make_nat_expr(4), cons(make_nat_expr(5), nil()))\n shorter_res = intrp.evaluate(zip(l3, l2))\n truncated = to_list(shorter_res)\n assert len(truncated) == 2\n assert count(truncated[0][0]) == 4\n assert len(to_list(truncated[0][1])) == 0\n assert count(truncated[1][0]) == 5\n assert len(to_list(truncated[1][1])) == 1\n\n l4 = cons(nil(), nil())\n shortest_res = intrp.evaluate(zip(l3, l4))\n singleton = to_list(shortest_res)\n assert len(singleton) == 1\n assert count(singleton[0][0]) == 4\n assert len(to_list(singleton[0][1])) == 0\n\n\ndef test_rev():\n a = relay.TypeVar(\"a\")\n assert mod[rev].checked_type == relay.FuncType([l(a)], l(a), [a])\n\n res = intrp.evaluate(rev(cons(make_nat_expr(1),\n cons(make_nat_expr(2),\n cons(make_nat_expr(3), nil())))))\n reversed = to_list(res)\n\n assert len(reversed) == 3\n assert count(reversed[0]) == 3\n assert count(reversed[1]) == 2\n assert count(reversed[2]) == 1\n\n\ndef test_unfoldr():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n expected_type = relay.FuncType([\n relay.FuncType([a], optional(relay.TupleType([a, b]))), a],\n l(b), [a, b])\n\n x = relay.Var(\"x\", nat())\n n = relay.Var(\"n\", nat())\n count_down = relay.Function(\n [x],\n relay.Match(x, [\n relay.Clause(relay.PatternConstructor(\n s, [relay.PatternVar(n)]),\n some(relay.Tuple([n, x]))),\n relay.Clause(relay.PatternConstructor(z, []), none())\n ]))\n\n res = intrp.evaluate(unfoldr(count_down, make_nat_expr(3)))\n unfolded = to_list(res)\n\n assert len(unfolded) == 3\n assert count(unfolded[0]) == 3\n assert count(unfolded[1]) == 2\n assert count(unfolded[2]) == 1\n\n\ndef test_unfoldl():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n expected_type = relay.FuncType([\n relay.FuncType([a], optional(relay.TupleType([a, b]))), a],\n l(b), [a, b])\n\n x = relay.Var(\"x\", nat())\n n = relay.Var(\"n\", nat())\n count_down = relay.Function(\n [x],\n relay.Match(x, [\n relay.Clause(relay.PatternConstructor(\n s, [relay.PatternVar(n)]),\n some(relay.Tuple([n, x]))),\n relay.Clause(relay.PatternConstructor(z, []), none())\n ]))\n\n res = intrp.evaluate(unfoldl(count_down, make_nat_expr(3)))\n unfolded = to_list(res)\n\n assert len(unfolded) == 3\n assert count(unfolded[0]) == 1\n assert count(unfolded[1]) == 2\n assert count(unfolded[2]) == 3\n\n\ndef test_map_accumr():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n c = relay.TypeVar(\"c\")\n expected_type = relay.FuncType([\n relay.FuncType([a, b], relay.TupleType([a, c])),\n a, l(b)\n ], relay.TupleType([a, l(c)]), [a, b, c])\n assert mod[map_accumr].checked_type == expected_type\n\n acc = relay.Var(\"acc\", nat())\n x = relay.Var(\"x\", nat())\n add_acc_to_each = relay.Function([acc, x],\n relay.Tuple([add(x, acc),\n add(x, acc)]))\n\n vals = cons(make_nat_expr(1), cons(make_nat_expr(2), cons(make_nat_expr(3), nil())))\n res = intrp.evaluate(map_accumr(add_acc_to_each, z(), vals))\n\n sum = count(res[0])\n new_vals = to_list(res[1])\n\n assert sum == 6\n assert len(new_vals) == 3\n assert count(new_vals[0]) == 6\n assert count(new_vals[1]) == 5\n assert count(new_vals[2]) == 3\n\n\ndef test_map_accuml():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n c = relay.TypeVar(\"c\")\n expected_type = relay.FuncType([\n relay.FuncType([a, b], relay.TupleType([a, c])),\n a, l(b)\n ], relay.TupleType([a, l(c)]), [a, b, c])\n assert mod[map_accuml].checked_type == expected_type\n\n acc = relay.Var(\"acc\", nat())\n x = relay.Var(\"x\", nat())\n add_to_acc = relay.Function([acc, x],\n relay.Tuple([add(x, acc), x]))\n\n vals = cons(make_nat_expr(1), cons(make_nat_expr(2), cons(make_nat_expr(3), nil())))\n res = intrp.evaluate(map_accuml(add_to_acc, z(), vals))\n\n sum = count(res[0])\n new_vals = to_list(res[1])\n\n assert sum == 6\n assert len(new_vals) == 3\n assert count(new_vals[0]) == 3\n assert count(new_vals[1]) == 2\n assert count(new_vals[2]) == 1\n\n\ndef test_optional_matching():\n x = relay.Var('x')\n y = relay.Var('y')\n v = relay.Var('v')\n condense = relay.Function(\n [x, y],\n relay.Match(x, [\n relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(v)]), cons(v, y)),\n relay.Clause(relay.PatternConstructor(none), y)\n ]))\n\n res = intrp.evaluate(foldr(condense, nil(), cons(\n some(make_nat_expr(3)),\n cons(none(), cons(some(make_nat_expr(1)), nil())))))\n\n reduced = to_list(res)\n assert len(reduced) == 2\n assert count(reduced[0]) == 3\n assert count(reduced[1]) == 1\n\n\ndef test_tmap():\n a = relay.TypeVar(\"a\")\n b = relay.TypeVar(\"b\")\n lhs = mod[tmap].checked_type\n rhs = relay.FuncType([relay.FuncType([a], b), tree(a)], tree(b), [a, b])\n assert lhs == rhs\n\n x = relay.Var(\"x\")\n add_one = relay.Function([x], s(x))\n res = intrp.evaluate(tmap(add_one,\n rose(z(),\n cons(rose(z(), nil()),\n cons(rose(z(), nil()),\n nil())))))\n\n tree_dict = tree_to_dict(res)\n assert count(tree_dict['member']) == 1\n assert len(tree_dict['children']) == 2\n for subtree in tree_dict['children']:\n assert count(subtree['member']) == 1\n assert len(subtree['children']) == 0\n\n\ndef test_size():\n a = relay.TypeVar(\"a\")\n lhs = mod[size].checked_type\n rhs = relay.FuncType([tree(a)], relay.scalar_type('int32'), [a])\n assert lhs == rhs\n\n root = rose(z(), cons(rose(z(), nil()),\n cons(rose(z(), nil()),\n nil())))\n t = rose(z(), cons(root, cons(root, cons(root, nil()))))\n res = intrp.evaluate(size(t))\n assert get_scalar(res) == 10\n\n\ndef test_wildcard_match_solo():\n x = relay.Var('x', nat())\n copy = relay.Function([x],\n relay.Match(x, [relay.Clause(relay.PatternWildcard(), x)]),\n nat())\n\n res = intrp.evaluate(copy(s(s(s(z())))))\n assert count(res) == 3\n\n\ndef test_wildcard_match_order():\n x = relay.Var('x', l(nat()))\n y = relay.Var('y')\n a = relay.Var('a')\n return_zero = relay.Function(\n [x],\n relay.Match(x, [\n relay.Clause(relay.PatternWildcard(), z()),\n relay.Clause(\n relay.PatternConstructor(\n cons, [relay.PatternVar(y), relay.PatternVar(a)]),\n y),\n relay.Clause(relay.PatternConstructor(nil), s(z()))\n ]),\n nat())\n\n res = intrp.evaluate(return_zero(cons(s(z()), nil())))\n # wildcard pattern is evaluated first\n assert count(res) == 0\n\n\ndef test_nested_matches():\n a = relay.TypeVar('a')\n x = relay.Var('x')\n y = relay.Var('y')\n w = relay.Var('w')\n h = relay.Var('h')\n t = relay.Var('t')\n flatten = relay.GlobalVar('flatten')\n\n # flatten could be written using a fold, but this way has nested matches\n inner_match = relay.Match(\n y, [\n relay.Clause(relay.PatternConstructor(nil), flatten(w)),\n relay.Clause(relay.PatternConstructor(\n cons, [relay.PatternVar(h), relay.PatternVar(t)]),\n cons(h, flatten(cons(t, w))))\n ])\n\n mod[flatten] = relay.Function(\n [x],\n relay.Match(x, [\n relay.Clause(relay.PatternConstructor(nil), nil()),\n relay.Clause(relay.PatternConstructor(\n cons, [relay.PatternVar(y), relay.PatternVar(w)]),\n inner_match)\n ]), l(a), [a])\n\n first_list = cons(make_nat_expr(1), cons(make_nat_expr(2),\n cons(make_nat_expr(3), nil())))\n second_list = cons(make_nat_expr(4), cons(make_nat_expr(5),\n cons(make_nat_expr(6), nil())))\n final_list = cons(first_list, cons(second_list, nil()))\n\n res = intrp.evaluate(flatten(final_list))\n\n flat = to_list(res)\n assert len(flat) == 6\n for i in range(6):\n assert count(flat[i]) == i + 1\n\n\ndef test_match_full_var():\n x = relay.Var('x')\n v = relay.Var('v')\n id_func = relay.Function([x],\n relay.Match(x,\n [relay.Clause(relay.PatternVar(v),\n v)]))\n\n res1 = intrp.evaluate(id_func(nil()))\n res2 = intrp.evaluate(id_func(cons(z(), cons(z(), nil()))))\n\n empty = to_list(res1)\n assert len(empty) == 0\n\n zeroes = to_list(res2)\n assert len(zeroes) == 2\n assert count(zeroes[0]) == 0\n assert count(zeroes[1]) == 0\n\n\ndef test_nested_pattern_match():\n x = relay.Var('x', l(nat()))\n h1 = relay.Var('h1')\n h2 = relay.Var('h2')\n t = relay.Var('t')\n match = relay.Match(\n x,\n [relay.Clause(\n relay.PatternConstructor(\n cons,\n [relay.PatternVar(h1),\n relay.PatternConstructor(\n cons,\n [relay.PatternVar(h2), relay.PatternVar(t)])]),\n h2),\n relay.Clause(relay.PatternWildcard(), z())\n ])\n get_second = relay.Function([x], match)\n\n res = intrp.evaluate(get_second(cons(s(z()),\n cons(s(s(z())),\n nil()))))\n\n assert count(res) == 2\n\n\ndef test_compose():\n n = relay.Var('n')\n inc = relay.Function([n], s(n))\n x = relay.Var('x')\n res = intrp.evaluate(relay.Call(compose(inc, double), [s(s(z()))]))\n assert count(res) == 5\n\n\ndef test_iterate():\n expr = relay.Call(iterate(double, relay.const(2)), [make_nat_expr(3)])\n res = intrp.evaluate(relay.Function([], expr)())\n assert count(res) == 12\n\n\ndef check_tensor_array(ta_mod, ref_res, *args, dtype=\"float32\",\n ta_ctx=tvm.cpu(), target=\"llvm\", rtol=1e-5):\n for kind in [\"debug\", \"vm\"]:\n ex = relay.create_executor(kind, mod=ta_mod, ctx=ta_ctx, target=target)\n result = ex.evaluate()(*args)\n got = vmobj_to_list(result, dtype)\n tvm.testing.assert_allclose(ref_res, got, rtol=rtol, atol=rtol)\n\n\ndef test_tensor_expand_dims():\n def run(dtype):\n x = relay.var('x')\n mod = tvm.IRModule()\n p = Prelude(mod)\n expand_dims_func = p.get_var('tensor_expand_dims', dtype)\n tensor1 = p.get_var('tensor1', dtype)\n mod[\"main\"] = relay.Function([x], expand_dims_func(tensor1(x)))\n x_np = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)\n expected = [np.expand_dims(x_np, axis=0)]\n check_tensor_array(mod, expected, x_np)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_constructor():\n def run(dtype):\n x = relay.var('x')\n mod = tvm.IRModule()\n p = Prelude(mod)\n tensor_array = p.get_var('tensor_array', dtype)\n mod[\"main\"] = relay.Function([x], tensor_array(x))\n expected = np.array([0, 0, 0, 0, 0])\n check_tensor_array(mod, expected, 5, dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_read():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n l = relay.var('l')\n i = relay.var('i')\n read_func = p.get_var('tensor_array_read', dtype)\n tensor_array = p.get_var('tensor_array', dtype)\n mod[\"main\"] = relay.Function([l, i], read_func(tensor_array(l), i))\n expected = [0]\n check_tensor_array(mod, expected, *(1, 0), dtype=dtype)\n check_tensor_array(mod, expected, *(5, 1), dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_write():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n tensor_array = p.get_var('tensor_array', dtype)\n init_tensor_array = tensor_array(relay.const(2))\n write_func = p.get_var('tensor_array_write', dtype)\n tensor1 = p.get_var('tensor1', dtype)\n tensor_array1 = write_func(init_tensor_array, relay.const(0),\n tensor1(v1))\n tensor_array2 = write_func(tensor_array1, relay.const(1), tensor1(v2))\n mod[\"main\"] = relay.Function([v1, v2], tensor_array2)\n expected = [3, 7]\n check_tensor_array(mod, expected, *(3, 7), dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_stack():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n tensor_array = p.get_var('tensor_array', dtype)\n tensor1 = p.get_var('tensor1', dtype)\n write = p.get_var('tensor_array_write', dtype)\n stack = p.get_var('tensor_array_stack', dtype)\n v = relay.var('v')\n init_tensor_array = tensor_array(relay.const(3))\n tensor_array1 = write(init_tensor_array, relay.const(0), tensor1(v))\n tensor_array2 = write(tensor_array1, relay.const(1), tensor1(v))\n tensor_array3 = write(tensor_array2, relay.const(2), tensor1(v))\n tensor_array4 = stack(tensor_array3)\n mod[\"main\"] = relay.Function([v], tensor_array4)\n t = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)\n expected = [np.stack([t, t, t])]\n check_tensor_array(mod, expected, t, dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_unstack():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n unstack_tensor1 = p.get_var('tensor_array_unstack_tensor1', dtype)\n v = relay.var('v')\n mod[\"main\"] = relay.Function([v], unstack_tensor1(v))\n t = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)\n check_tensor_array(mod, t, t, dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_take():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n take = p.get_var('tensor_take', dtype)\n tensor2 = p.get_var('tensor2', dtype)\n v = relay.var('v')\n lower = relay.var('lower')\n upper = relay.var('upper')\n mod[\"main\"] = relay.Function([v, lower, upper], take(tensor2(v), lower, upper))\n v_data = np.random.uniform(low=0.0, high=8.0, size=(10, 10)).astype(dtype)\n expected = [np.take(v_data, range(2, 5), axis=0)]\n check_tensor_array(mod, expected, *(v_data, 2, 5), dtype=dtype)\n expected = [np.take(v_data, range(0, 9), axis=0)]\n check_tensor_array(mod, expected, *(v_data, 0, 9), dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_concatenate():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n concat = p.get_var('tensor_concatenate', dtype)\n tensor1 = p.get_var('tensor1', dtype)\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n mod[\"main\"] = relay.Function([v1, v2], concat(tensor1(v1),\n tensor1(v2)))\n v1_data = np.random.uniform(low=0.0, high=8.0, size=(5,)).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=(5,)).astype(dtype)\n expected = [np.concatenate((v1_data, v2_data))]\n check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_concat():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n tensor_array = p.get_var('tensor_array', dtype)\n tensor_array1 = tensor_array(relay.const(2))\n write_func = p.get_var('tensor_array_write', dtype)\n concat_func = p.get_var('tensor_array_concat', dtype)\n tensor1 = p.get_var('tensor2', dtype)\n tensor_array1 = write_func(tensor_array1, relay.const(0), tensor1(v1))\n tensor_array1 = write_func(tensor_array1, relay.const(1), tensor1(v2))\n tensor_array_concat = concat_func(tensor_array1)\n mod[\"main\"] = relay.Function([v1, v2], tensor_array_concat)\n v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=(1, 3)).astype(dtype)\n expected = [np.concatenate((v1_data, v2_data), axis=0)]\n check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_scatter():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n # tensor array\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n v3 = relay.var('v2')\n tensor_array = p.get_var('tensor_array', dtype)\n tensor_array1 = tensor_array(relay.const(3))\n write_func = p.get_var('tensor_array_write', dtype)\n scatter_func = p.get_var('tensor_array_scatter', dtype)\n tensor2 = p.get_var('tensor2', dtype)\n tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))\n tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))\n tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))\n\n # indices array\n index = relay.var('index')\n\n # values array\n value_0 = relay.var('value_0')\n value_1 = relay.var('value_1')\n values_array = tensor_array(relay.const(2))\n values_array = write_func(values_array, relay.const(0),\n tensor2(value_0))\n values_array = write_func(values_array, relay.const(1),\n tensor2(value_1))\n\n # create the scatter function\n tensor_array_scatter = scatter_func(tensor_array1, index, values_array)\n mod[\"main\"] = relay.Function([v1, v2, v3, index, value_0, value_1],\n tensor_array_scatter)\n\n # initialize and check\n v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n index_data = np.array([0, 1], dtype=\"int32\")\n val1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n val2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n expected = [val1_data, val2_data, v3_data]\n check_tensor_array(mod, expected, *(v1_data, v2_data, v3_data,\n index_data, val1_data,\n val2_data), dtype=dtype)\n run('float32')\n run('int32')\n\n\ndef test_tensor_array_split():\n def run(dtype):\n mod = tvm.IRModule()\n p = Prelude(mod)\n\n # tensor array\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n v3 = relay.var('v2')\n tensor_array = p.get_var('tensor_array', dtype)\n tensor_array1 = tensor_array(relay.const(3))\n write_func = p.get_var('tensor_array_write', dtype)\n split_func = p.get_var('tensor_array_split', dtype)\n tensor2 = p.get_var('tensor2', dtype)\n tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))\n tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))\n tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))\n\n # value tensor\n value = relay.var('value')\n\n # lengths tensor\n ta_len = relay.var('length')\n\n # create the scatter function\n tensor_array_split = split_func(tensor_array1, tensor2(value), ta_len)\n mod[\"main\"] = relay.Function([v1, v2, v3, value, ta_len],\n tensor_array_split)\n\n # initialize and check\n v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n value_data = np.random.uniform(low=0.0, high=8.0, size=(4, 3)).astype(dtype)\n length_data = np.array([2, 2], dtype=\"int32\")\n expected = np.concatenate([value_data, v3_data])\n expected = np.split(expected, indices_or_sections=[2, 4])\n check_tensor_array(mod, expected, *(v1_data, v2_data, v3_data,\n value_data, length_data),\n dtype=dtype)\n run('float32')\n run('int32')\n\ndef test_static_tensor_take():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n take = p.get_var_static('tensor_take', dtype, shape)\n tensor_constructor = p.get_var_static('tensor_constructor', dtype, shape)\n v = relay.var('v')\n lower = relay.var('lower')\n upper = relay.var('upper')\n mod[\"main\"] = relay.Function([v, lower, upper], take(tensor_constructor(v), lower, upper))\n v_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n expected = [np.take(v_data, range(2, 5), axis=0)]\n check_tensor_array(mod, expected, *(v_data, 2, 5), dtype=dtype)\n expected = [np.take(v_data, range(0, 9), axis=0)]\n check_tensor_array(mod, expected, *(v_data, 0, 9), dtype=dtype)\n run('float32', [10, 10])\n run('int32', [15, 11])\n\n\ndef test_static_tensor_concatenate():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n concat = p.get_var_static('tensor_concatenate', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n mod[\"main\"] = relay.Function([v1, v2], concat(tensor(v1),\n tensor(v2)))\n v1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n expected = [np.concatenate((v1_data, v2_data))]\n check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)\n run('float32', [5,])\n run('int32', [2, 3])\n\n\ndef test_static_tensor_expand_dims():\n def run(dtype, shape):\n x = relay.var('x')\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n expand_dims_func = p.get_var_static('tensor_expand_dims', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n mod[\"main\"] = relay.Function([x], expand_dims_func(tensor(x)))\n x_np = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n expected = [np.expand_dims(x_np, axis=0)]\n check_tensor_array(mod, expected, x_np)\n run('float32', [])\n run('int32', [2,])\n\n\ndef test_static_tensor_array_constructor():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n tensor_constructor = p.get_name_static('tensor_constructor', dtype, shape)\n assert tensor_constructor != None\n run('float32', [1, 1])\n\n\ndef test_static_tensor_array_read():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n np_data_list = []\n ta_length = 3\n for _ in range(ta_length):\n np_data_list.append(np.random.uniform(0, 10, size=shape).astype(dtype))\n\n v0 = relay.var('v0')\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n n = relay.var('n')\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n tensor_array = p.get_var_static('tensor_array', dtype, shape)\n init_tensor_array = tensor_array(relay.const(ta_length))\n read_func = p.get_var_static('tensor_array_read', dtype, shape)\n write_func = p.get_var_static('tensor_array_write', dtype, shape)\n tensor_array0 = write_func(init_tensor_array, relay.const(0),\n tensor(v0))\n tensor_array1 = write_func(tensor_array0, relay.const(1),\n tensor(v1))\n tensor_array2 = write_func(tensor_array1, relay.const(2),\n tensor(v2))\n\n mod[\"main\"] = relay.Function([v0, v1, v2, n], read_func(tensor_array2, n))\n expected = [np_data_list[0]]\n check_tensor_array(mod, expected, *list(np_data_list + [0]), dtype=dtype)\n expected = [np_data_list[1]]\n check_tensor_array(mod, expected, *list(np_data_list + [1]), dtype=dtype)\n expected = [np_data_list[2]]\n check_tensor_array(mod, expected, *list(np_data_list + [2]), dtype=dtype)\n run('float32', [])\n run('int32', [2, 3])\n\n\ndef test_static_tensor_array_write():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n ta_length = 2\n np_data_list = [np.random.uniform(0, 10, size=shape).astype(dtype) for _ in range(ta_length)]\n\n v0 = relay.var('v0')\n v1 = relay.var('v1')\n tensor_array = p.get_var_static('tensor_array', dtype, shape)\n init_tensor_array = tensor_array(relay.const(ta_length))\n write_func = p.get_var_static('tensor_array_write', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n tensor_array0 = write_func(init_tensor_array, relay.const(0),\n tensor(v0))\n tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))\n mod[\"main\"] = relay.Function([v0, v1], tensor_array1)\n expected = np_data_list\n check_tensor_array(mod, expected, *np_data_list, dtype=dtype)\n run('float32', [])\n run('int32', [2, 3])\n\n\ndef test_static_tensor_array_unstack():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n unstack_tensor = p.get_var_static('tensor_array_unstack', dtype, shape)\n v = relay.var('v')\n mod[\"main\"] = relay.Function([v], unstack_tensor(v))\n t = np.random.uniform(low=0, high=10, size=shape).astype(dtype)\n *expected, = t\n check_tensor_array(mod, expected, t, dtype=dtype)\n run('float32', [4])\n run('int32', [2, 3])\n\n\ndef test_static_tensor_array_scatter():\n def run(dtype, shape, indices_shape=None):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n if indices_shape is not None:\n static_tensor_array_ops.define_tensor_array_scatter(indices_shape, True)\n\n # tensor array\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n v3 = relay.var('v2')\n tensor_array = p.get_var_static('tensor_array', dtype, shape)\n tensor_array0 = tensor_array(relay.const(3))\n write_func = p.get_var_static('tensor_array_write', dtype, shape)\n scatter_func = p.get_var_static('tensor_array_scatter', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n tensor_array1 = write_func(tensor_array0, relay.const(0), tensor(v1))\n tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))\n tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))\n\n # indices array\n index = relay.var('index')\n\n # values array\n value_0 = relay.var('value_0')\n value_1 = relay.var('value_1')\n values_array = tensor_array(relay.const(2))\n values_array = write_func(values_array, relay.const(0),\n tensor(value_0))\n values_array = write_func(values_array, relay.const(1),\n tensor(value_1))\n\n # create the scatter function\n tensor_array_scatter = scatter_func(tensor_array1, index, values_array)\n mod[\"main\"] = relay.Function([v1, v2, v3, index, value_0, value_1],\n tensor_array_scatter)\n\n # initialize and check\n v1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n v3_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n index_data = np.array([0, 1], dtype=\"int32\")\n val1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n val2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n expected = [val1_data, val2_data, v3_data]\n check_tensor_array(mod, expected, *(v1_data, v2_data, v3_data,\n index_data, val1_data,\n val2_data), dtype=dtype)\n run('float32', [2, 3])\n run('int32', [2, 3])\n run('float32', [2, 3], [2,])\n\n\ndef test_static_tensor_array_split():\n def run(dtype, shape, value_shape=None, lengths_shape=None):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n if value_shape is not None or lengths_shape is not None:\n static_tensor_array_ops.define_tensor_array_split(value_shape, lengths_shape, True)\n\n # tensor array\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n v3 = relay.var('v2')\n\n adt_shape = [relay.Any(),] + shape[1:]\n origin_shape = static_tensor_array_ops.shape\n static_tensor_array_ops.shape = adt_shape\n static_tensor_array_ops.define_tensor_array()\n tensor_array = p.get_var_static('tensor_array', dtype, adt_shape)\n static_tensor_array_ops.shape = origin_shape\n tensor_array1 = tensor_array(relay.const(3))\n write_func = p.get_var_static('tensor_array_write', dtype, adt_shape)\n split_func = p.get_var_static('tensor_array_split', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, adt_shape)\n tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))\n tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))\n tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))\n\n # value tensor\n value = relay.var('value')\n\n # lengths tensor\n ta_len = relay.var('length')\n\n # create the split function\n if value_shape is None:\n tensor1 = p.get_var_static('tensor_constructor', dtype, shape)\n else:\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, value_shape)\n static_tensor_array_ops.register()\n tensor1 = p.get_var_static('tensor_constructor', dtype, value_shape)\n tensor_array_split = split_func(tensor_array1, tensor1(value), ta_len)\n mod[\"main\"] = relay.Function([v1, v2, v3, value, ta_len],\n tensor_array_split)\n\n # initialize and check\n v1_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)\n v3_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)\n value_data = np.random.uniform(low=0.0, high=8.0,\n size=value_shape or shape).astype(dtype)\n length_data = np.array([2, 2], dtype=\"int32\")\n expected = np.concatenate([value_data, v3_data])\n expected = np.split(expected, indices_or_sections=[2, 4])\n check_tensor_array(mod, expected, *(v1_data, v2_data, v3_data,\n value_data, length_data),\n dtype=dtype)\n\n run('float32', [4, 3])\n run('int32', [4, 3])\n run('int32', [relay.Any(), 3], [4, 3], [2,])\n\n\ndef test_static_tensor_array_concat():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n tensor_array = p.get_var_static('tensor_array', dtype, shape)\n tensor_array1 = tensor_array(relay.const(2))\n write_func = p.get_var_static('tensor_array_write', dtype, shape)\n concat_func = p.get_var_static('tensor_array_concat', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))\n tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))\n tensor_array_concat = concat_func(tensor_array1)\n mod[\"main\"] = relay.Function([v1, v2], tensor_array_concat)\n v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)\n v2_data = np.random.uniform(low=0.0, high=8.0, size=(1, 3)).astype(dtype)\n expected = [np.concatenate((v1_data, v2_data), axis=0)]\n check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)\n run('float32', [relay.Any(), 3])\n run('int32', [relay.Any(), 3])\n\n\ndef test_static_tensor_array_gather():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n tensor_array = p.get_var_static('tensor_array', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n write = p.get_var_static('tensor_array_write', dtype, shape)\n gather = p.get_var_static('tensor_array_gather', dtype, shape)\n v = relay.var('v')\n indice = relay.var('indice')\n init_tensor_array = tensor_array(relay.const(3))\n tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))\n tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))\n tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))\n out = gather(tensor_array3, indice)\n mod[\"main\"] = relay.Function([v, indice], out)\n t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n indice_data = np.array([0, 2], dtype=\"int32\")\n expected = [np.stack([t, t])]\n check_tensor_array(mod, expected, *(t, indice_data), dtype=dtype)\n run('float32', [])\n run('int32', [2, 3])\n\n\ndef test_static_tensor_array_stack():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n tensor_array = p.get_var_static('tensor_array', dtype, shape)\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n write = p.get_var_static('tensor_array_write', dtype, shape)\n stack = p.get_var_static('tensor_array_stack', dtype, shape)\n v = relay.var('v')\n init_tensor_array = tensor_array(relay.const(3))\n tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))\n tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))\n tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))\n tensor_array4 = stack(tensor_array3)\n mod[\"main\"] = relay.Function([v], tensor_array4)\n t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)\n expected = [np.stack([t, t, t])]\n check_tensor_array(mod, expected, t, dtype=dtype)\n run('float32', [])\n run('int32', [2, 3])\n\n\ndef test_static_tensor_get_data():\n def run(dtype, shape):\n mod = tvm.IRModule()\n p = Prelude(mod)\n static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)\n static_tensor_array_ops.register()\n\n np_data_list = []\n ta_length = 3\n for _ in range(ta_length):\n np_data_list.append(np.random.uniform(0, 10, size=shape).astype(dtype))\n\n v0 = relay.var('v0')\n v1 = relay.var('v1')\n v2 = relay.var('v2')\n n = relay.var('n')\n tensor = p.get_var_static('tensor_constructor', dtype, shape)\n tensor_array = p.get_var_static('tensor_array', dtype, shape)\n init_tensor_array = tensor_array(relay.const(ta_length))\n read_func = p.get_var_static('tensor_array_read', dtype, shape)\n write_func = p.get_var_static('tensor_array_write', dtype, shape)\n get_data_func = p.get_var_static('tensor_get_data', dtype, shape)\n tensor_array0 = write_func(init_tensor_array, relay.const(0),\n tensor(v0))\n tensor_array1 = write_func(tensor_array0, relay.const(1),\n tensor(v1))\n tensor_array2 = write_func(tensor_array1, relay.const(2),\n tensor(v2))\n\n mod[\"main\"] = relay.Function([v0, v1, v2, n], get_data_func(read_func(tensor_array2, n)))\n expected = [np_data_list[0]]\n check_tensor_array(mod, expected, *list(np_data_list + [0]), dtype=dtype)\n expected = [np_data_list[1]]\n check_tensor_array(mod, expected, *list(np_data_list + [1]), dtype=dtype)\n expected = [np_data_list[2]]\n check_tensor_array(mod, expected, *list(np_data_list + [2]), dtype=dtype)\n run('float32', [])\n run('int32', [2, 3])\n\nif __name__ == \"__main__\":\n test_nat_constructor()\n test_double()\n test_add()\n test_list_constructor()\n test_length()\n test_map()\n test_foldl()\n test_foldr()\n test_foldr1()\n test_concat()\n test_filter()\n test_zip()\n test_rev()\n test_unfoldl()\n test_unfoldr()\n test_map_accumr()\n test_map_accuml()\n test_sum()\n test_tmap()\n test_size()\n test_compose()\n test_iterate()\n\n test_tensor_expand_dims()\n test_tensor_array_constructor()\n test_tensor_array_read()\n test_tensor_array_write()\n test_tensor_array_stack()\n test_tensor_array_unstack()\n test_tensor_take()\n test_tensor_concatenate()\n test_tensor_array_concat()\n test_tensor_array_scatter()\n test_tensor_array_split()\n\n test_static_tensor_take()\n test_static_tensor_concatenate()\n test_static_tensor_expand_dims()\n test_static_tensor_array_constructor()\n test_static_tensor_array_read()\n test_static_tensor_array_write()\n test_static_tensor_array_unstack()\n test_static_tensor_array_scatter()\n test_static_tensor_array_split()\n test_static_tensor_array_concat()\n test_static_tensor_array_stack()\n test_static_tensor_array_gather()\n test_static_tensor_get_data()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Support level3 operator test cases.\n\"\"\"\nimport numpy as np\nimport pytest\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.relay import create_executor, transform\nfrom tvm.relay.testing import ctx_list, check_grad, run_infer_type\n\ndef verify_func(func, data, ref_res):\n assert isinstance(data, list)\n for target, ctx in ctx_list():\n #TODO(mbrookhart): enable Cuda tests onces the VM supports dynamic shapes\n if \"llvm\" not in target: continue\n for kind in [\"vm\", \"debug\"]:\n mod = tvm.ir.IRModule.from_expr(func)\n intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)\n op_res = intrp.evaluate()(*data)\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n relay.backend.compile_engine.get().clear()\n\ndef test_dyn_reshape():\n def verify_reshape(shape, newshape, oshape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n y = relay.var(\"y\", relay.TensorType((len(newshape), ), \"int64\"))\n z = relay.reshape(x, y)\n\n func = relay.Function([x, y], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n x_data = np.ones(shape).astype(\"float32\")\n ref_res = np.reshape(x_data, oshape)\n check_grad(run_infer_type(func),\n inputs=[x_data, np.array(newshape).astype(\"int64\")],\n test_inputs=[x_data], eps=1e-3)\n verify_func(func, [x_data, np.array(newshape).astype(\"int64\")], ref_res)\n verify_reshape((2, 3, 4), (8, 3), (8, 3))\n verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))\n verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))\n verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))\n verify_reshape((2, 3, 4), (0, -1), (2, 12))\n verify_reshape((2, 3, 4), (-1, 0), (8, 3))\n verify_reshape((2, 3, 4), (-3, 4), (6, 4))\n verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))\n verify_reshape((2, 3, 4), (0, -3), (2, 12))\n\ndef test_dyn_shape_reshape():\n def verify_reshape(shape, newshape, oshape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n y = relay.var(\"y\", relay.TensorType(newshape, \"float32\"))\n z = relay.reshape(x, relay.shape_of(y))\n\n func = relay.Function([x, y], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n y_data = np.random.uniform(low=-1, high=1, size=newshape).astype(\"float32\")\n ref_res = np.reshape(x_data, oshape)\n check_grad(run_infer_type(func),\n inputs=[x_data, y_data], eps=1e-3)\n verify_func(func, [x_data, y_data], ref_res)\n verify_reshape((2, 3, 4), (8, 3), (8, 3))\n verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))\n\ndef test_dyn_tile():\n def verify_tile(dshape, reps):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n r = relay.var(\"reps\", relay.TensorType((len(reps), ), \"float32\"))\n z = relay.tile(x, r)\n\n func = relay.Function([x, r], z)\n x_data = np.random.uniform(low=-1, high=1, size=dshape).astype(\"float32\")\n ref_res = np.tile(x_data, reps=reps)\n reps_data = np.array(reps).astype(\"float32\")\n verify_func(func, [x_data, np.array(reps).astype(\"float32\")], ref_res)\n verify_tile((2, 3, 4), (3, 2, 1))\n verify_tile((2, 3, 4), (1, 2))\n verify_tile((2, 3), (3, 2, 1))\n\n\ndef test_dyn_zeros_ones():\n def verify_zeros_ones(shape, dtype):\n for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:\n rank = len(shape)\n dyn_shape = relay.Var(\"shape\", relay.ty.TensorType((rank,), 'int64'))\n y = op(dyn_shape, dtype)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)\n\n func = relay.Function([dyn_shape], y)\n ref_res = ref(shape, dtype)\n for target, ctx in ctx_list():\n if (target != 'cuda'): #skip cuda because no dynamic support for GPU \n for kind in [\"vm\", \"debug\"]:\n mod = tvm.ir.IRModule.from_expr(func)\n intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(np.array(shape).astype('int64'))\n tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n\n verify_zeros_ones((124, 50), 'float64')\n\nif __name__ == \"__main__\":\n test_dyn_reshape()\n test_dyn_shape_reshape()\n test_dyn_tile()\n test_dyn_zeros_ones()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport tvm\nfrom tvm import rpc, relay\nfrom tvm.contrib.download import download_testdata\nfrom tvm.relay.expr_functor import ExprMutator\nfrom tvm.relay import transform\nfrom tvm.relay.op.annotation import compiler_begin, compiler_end\nfrom tvm.relay.quantize.quantize import prerequisite_optimize\nfrom tvm.contrib import util, xcode, graph_runtime, coreml_runtime\nfrom tvm.contrib.target import coreml as _coreml\n\nimport os\nimport re\nimport sys\nimport numpy as np\nfrom mxnet import gluon\nfrom PIL import Image\nimport coremltools\n\n# Set to be address of tvm proxy.\nproxy_host = os.environ[\"TVM_IOS_RPC_PROXY_HOST\"]\n# Set your desination via env variable.\n# Should in format \"platform=iOS,id=<the test device uuid>\"\ndestination = os.environ[\"TVM_IOS_RPC_DESTINATION\"]\n\nif not re.match(r\"^platform=.*,id=.*$\", destination):\n print(\"Bad format: {}\".format(destination))\n print(\"Example of expected string: platform=iOS,id=1234567890abcabcabcabc1234567890abcabcab\")\n sys.exit(1)\n\nproxy_port = 9090\nkey = \"iphone\"\n\n# Change target configuration, this is setting for iphone6s\n#arch = \"x86_64\"\n#sdk = \"iphonesimulator\"\narch = \"arm64\"\nsdk = \"iphoneos\"\ntarget_host = \"llvm -mtriple=%s-apple-darwin\" % arch\n\n# override metal compiler to compile to iphone\[email protected]_func(\"tvm_callback_metal_compile\")\ndef compile_metal(src):\n return xcode.compile_metal(src, sdk=sdk)\n\ndef prepare_input():\n img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'\n img_name = 'cat.png'\n synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',\n '4d0b62f3d01426887599d4f7ede23ee5/raw/',\n '596b27d23537e5a1b5751d2b0481ef172f58b539/',\n 'imagenet1000_clsid_to_human.txt'])\n synset_name = 'imagenet1000_clsid_to_human.txt'\n img_path = download_testdata(img_url, 'cat.png', module='data')\n synset_path = download_testdata(synset_url, synset_name, module='data')\n with open(synset_path) as f:\n synset = eval(f.read())\n image = Image.open(img_path).resize((224, 224))\n\n image = np.array(image) - np.array([123., 117., 104.])\n image /= np.array([58.395, 57.12, 57.375])\n image = image.transpose((2, 0, 1))\n image = image[np.newaxis, :]\n return image.astype('float32'), synset\n\n\ndef get_model(model_name, data_shape):\n gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)\n mod, params = relay.frontend.from_mxnet(gluon_model, {\"data\": data_shape})\n # we want a probability so add a softmax operator\n func = mod[\"main\"]\n func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)\n\n return func, params\n\n\ndef test_mobilenet():\n temp = util.tempdir()\n image, synset = prepare_input()\n model, params = get_model('mobilenetv2_1.0', image.shape)\n\n def run(mod, target):\n with relay.build_config(opt_level=3):\n graph, lib, _params = relay.build(mod, target=target,\n target_host=target_host, params=params)\n path_dso = temp.relpath(\"deploy.dylib\")\n lib.export_library(path_dso, xcode.create_dylib, arch=arch, sdk=sdk)\n xcode.codesign(path_dso)\n\n # Start RPC test server that contains the compiled library.\n xcode.popen_test_rpc(proxy_host, proxy_port, key,\n destination=destination, libs=[path_dso])\n\n # connect to the proxy\n remote = rpc.connect(proxy_host, proxy_port, key=key)\n\n if target == \"metal\":\n ctx = remote.metal(0)\n else:\n ctx = remote.cpu(0)\n lib = remote.load_module(\"deploy.dylib\")\n m = graph_runtime.create(graph, lib, ctx)\n\n m.set_input('data', tvm.nd.array(image, ctx))\n m.set_input(**_params)\n m.run()\n tvm_output = m.get_output(0)\n top1 = np.argmax(tvm_output.asnumpy()[0])\n print('TVM prediction top-1:', top1, synset[top1])\n\n # evaluate\n ftimer = m.module.time_evaluator(\"run\", ctx, number=3, repeat=10)\n prof_res = np.array(ftimer().results) * 1000\n print(\"%-19s (%s)\" % (\"%.2f ms\" % np.mean(prof_res), \"%.2f ms\" % np.std(prof_res)))\n\n def annotate(func, compiler):\n \"\"\"\n An annotator for Core ML.\n \"\"\"\n # Bind free variables to the constant values.\n bind_dict = {}\n for arg in func.params:\n name = arg.name_hint\n if name in params:\n bind_dict[arg] = relay.const(params[name])\n\n func = relay.bind(func, bind_dict)\n\n # Annotate the entire graph for Core ML\n mod = tvm.IRModule()\n mod[\"main\"] = func\n\n seq = tvm.transform.Sequential([\n transform.SimplifyInference(),\n transform.FoldConstant(),\n transform.FoldScaleAxis(),\n transform.AnnotateTarget(compiler),\n transform.MergeCompilerRegions(),\n transform.PartitionGraph()\n ])\n\n with relay.build_config(opt_level=3):\n mod = seq(mod)\n\n return mod\n\n # CPU\n run(model, target_host)\n # Metal\n run(model, \"metal\")\n # CoreML\n run(annotate(model, \"coremlcompiler\"), target_host)\n\nif __name__ == \"__main__\":\n test_mobilenet()\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.random.uniform" ], [ "numpy.random.uniform" ], [ "numpy.random.uniform", "numpy.dot" ], [ "numpy.random.uniform", "numpy.zeros" ], [ "numpy.split", "numpy.expand_dims", "numpy.stack", "numpy.concatenate", "numpy.random.uniform", "numpy.array" ], [ "numpy.reshape", "numpy.tile", "numpy.ones", "numpy.random.uniform", "numpy.array" ], [ "numpy.std", "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kifarid/ray
[ "43c97c2afb979987be82fa50048674e9b6776d5d", "bc08c6cdcc7ddf4da751ca2a972defd3db509061", "43c97c2afb979987be82fa50048674e9b6776d5d", "43c97c2afb979987be82fa50048674e9b6776d5d", "bc08c6cdcc7ddf4da751ca2a972defd3db509061" ]
[ "rllib/agents/marwil/tests/test_marwil.py", "rllib/env/wrappers/atari_wrappers.py", "rllib/policy/torch_policy.py", "python/ray/_private/parameter.py", "rllib/evaluation/episode.py" ]
[ "import numpy as np\nimport os\nfrom pathlib import Path\nimport unittest\n\nimport ray\nimport ray.rllib.agents.marwil as marwil\nfrom ray.rllib.evaluation.postprocessing import compute_advantages\nfrom ray.rllib.offline import JsonReader\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\nfrom ray.rllib.utils.test_utils import check, check_compute_single_action, \\\n framework_iterator\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\nclass TestMARWIL(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n ray.init(num_cpus=4)\n\n @classmethod\n def tearDownClass(cls):\n ray.shutdown()\n\n def test_marwil_compilation_and_learning_from_offline_file(self):\n \"\"\"Test whether a MARWILTrainer can be built with all frameworks.\n\n Learns from a historic-data file.\n To generate this data, first run:\n $ ./train.py --run=PPO --env=CartPole-v0 \\\n --stop='{\"timesteps_total\": 50000}' \\\n --config='{\"output\": \"/tmp/out\", \"batch_mode\": \"complete_episodes\"}'\n \"\"\"\n rllib_dir = Path(__file__).parent.parent.parent.parent\n print(\"rllib dir={}\".format(rllib_dir))\n data_file = os.path.join(rllib_dir, \"tests/data/cartpole/large.json\")\n print(\"data_file={} exists={}\".format(data_file,\n os.path.isfile(data_file)))\n\n config = marwil.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 2\n config[\"evaluation_num_workers\"] = 1\n config[\"evaluation_interval\"] = 2\n # Evaluate on actual environment.\n config[\"evaluation_config\"] = {\"input\": \"sampler\"}\n # Learn from offline data.\n config[\"input\"] = [data_file]\n num_iterations = 350\n min_reward = 70.0\n\n # Test for all frameworks.\n for _ in framework_iterator(config, frameworks=(\"tf\", \"torch\")):\n trainer = marwil.MARWILTrainer(config=config, env=\"CartPole-v0\")\n learnt = False\n for i in range(num_iterations):\n eval_results = trainer.train().get(\"evaluation\")\n if eval_results:\n print(\"iter={} R={} \".format(\n i, eval_results[\"episode_reward_mean\"]))\n # Learn until some reward is reached on an actual live env.\n if eval_results[\"episode_reward_mean\"] > min_reward:\n print(\"learnt!\")\n learnt = True\n break\n\n if not learnt:\n raise ValueError(\n \"MARWILTrainer did not reach {} reward from expert \"\n \"offline data!\".format(min_reward))\n\n check_compute_single_action(\n trainer, include_prev_action_reward=True)\n\n trainer.stop()\n\n def test_marwil_loss_function(self):\n \"\"\"\n To generate the historic data used in this test case, first run:\n $ ./train.py --run=PPO --env=CartPole-v0 \\\n --stop='{\"timesteps_total\": 50000}' \\\n --config='{\"output\": \"/tmp/out\", \"batch_mode\": \"complete_episodes\"}'\n \"\"\"\n rllib_dir = Path(__file__).parent.parent.parent.parent\n print(\"rllib dir={}\".format(rllib_dir))\n data_file = os.path.join(rllib_dir, \"tests/data/cartpole/small.json\")\n print(\"data_file={} exists={}\".format(data_file,\n os.path.isfile(data_file)))\n config = marwil.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 0 # Run locally.\n # Learn from offline data.\n config[\"input\"] = [data_file]\n\n for fw, sess in framework_iterator(config, session=True):\n reader = JsonReader(inputs=[data_file])\n batch = reader.next()\n\n trainer = marwil.MARWILTrainer(config=config, env=\"CartPole-v0\")\n policy = trainer.get_policy()\n model = policy.model\n\n # Calculate our own expected values (to then compare against the\n # agent's loss output).\n cummulative_rewards = compute_advantages(\n batch, 0.0, config[\"gamma\"], 1.0, False, False)[\"advantages\"]\n if fw == \"torch\":\n cummulative_rewards = torch.tensor(cummulative_rewards)\n if fw != \"tf\":\n batch = policy._lazy_tensor_dict(batch)\n model_out, _ = model.from_batch(batch)\n vf_estimates = model.value_function()\n if fw == \"tf\":\n model_out, vf_estimates = \\\n policy.get_session().run([model_out, vf_estimates])\n adv = cummulative_rewards - vf_estimates\n if fw == \"torch\":\n adv = adv.detach().cpu().numpy()\n adv_squared = np.mean(np.square(adv))\n c_2 = 100.0 + 1e-8 * (adv_squared - 100.0)\n c = np.sqrt(c_2)\n exp_advs = np.exp(config[\"beta\"] * (adv / c))\n dist = policy.dist_class(model_out, model)\n logp = dist.logp(batch[\"actions\"])\n if fw == \"torch\":\n logp = logp.detach().cpu().numpy()\n elif fw == \"tf\":\n logp = sess.run(logp)\n # Calculate all expected loss components.\n expected_vf_loss = 0.5 * adv_squared\n expected_pol_loss = -1.0 * np.mean(exp_advs * logp)\n expected_loss = \\\n expected_pol_loss + config[\"vf_coeff\"] * expected_vf_loss\n\n # Calculate the algorithm's loss (to check against our own\n # calculation above).\n batch.set_get_interceptor(None)\n postprocessed_batch = policy.postprocess_trajectory(batch)\n loss_func = marwil.marwil_tf_policy.marwil_loss if fw != \"torch\" \\\n else marwil.marwil_torch_policy.marwil_loss\n if fw != \"tf\":\n policy._lazy_tensor_dict(postprocessed_batch)\n loss_out = loss_func(policy, model, policy.dist_class,\n postprocessed_batch)\n else:\n loss_out, v_loss, p_loss = policy.get_session().run(\n [policy._loss, policy.loss.v_loss, policy.loss.p_loss],\n feed_dict=policy._get_loss_inputs_dict(\n postprocessed_batch, shuffle=False))\n\n # Check all components.\n if fw == \"torch\":\n check(policy.v_loss, expected_vf_loss, decimals=4)\n check(policy.p_loss, expected_pol_loss, decimals=4)\n elif fw == \"tf\":\n check(v_loss, expected_vf_loss, decimals=4)\n check(p_loss, expected_pol_loss, decimals=4)\n else:\n check(policy.loss.v_loss, expected_vf_loss, decimals=4)\n check(policy.loss.p_loss, expected_pol_loss, decimals=4)\n check(loss_out, expected_loss, decimals=3)\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n", "from collections import deque\nimport cv2\nimport gym\nfrom gym import spaces\nimport numpy as np\n\ncv2.ocl.setUseOpenCL(False)\n\n\ndef is_atari(env):\n if (hasattr(env.observation_space, \"shape\")\n and env.observation_space.shape is not None\n and len(env.observation_space.shape) <= 2):\n return False\n return hasattr(env, \"unwrapped\") and hasattr(env.unwrapped, \"ale\")\n\n\ndef get_wrapper_by_cls(env, cls):\n \"\"\"Returns the gym env wrapper of the given class, or None.\"\"\"\n currentenv = env\n while True:\n if isinstance(currentenv, cls):\n return currentenv\n elif isinstance(currentenv, gym.Wrapper):\n currentenv = currentenv.env\n else:\n return None\n\n\nclass MonitorEnv(gym.Wrapper):\n def __init__(self, env=None):\n \"\"\"Record episodes stats prior to EpisodicLifeEnv, etc.\"\"\"\n gym.Wrapper.__init__(self, env)\n self._current_reward = None\n self._num_steps = None\n self._total_steps = None\n self._episode_rewards = []\n self._episode_lengths = []\n self._num_episodes = 0\n self._num_returned = 0\n\n def reset(self, **kwargs):\n obs = self.env.reset(**kwargs)\n\n if self._total_steps is None:\n self._total_steps = sum(self._episode_lengths)\n\n if self._current_reward is not None:\n self._episode_rewards.append(self._current_reward)\n self._episode_lengths.append(self._num_steps)\n self._num_episodes += 1\n\n self._current_reward = 0\n self._num_steps = 0\n\n return obs\n\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n self._current_reward += rew\n self._num_steps += 1\n self._total_steps += 1\n return (obs, rew, done, info)\n\n def get_episode_rewards(self):\n return self._episode_rewards\n\n def get_episode_lengths(self):\n return self._episode_lengths\n\n def get_total_steps(self):\n return self._total_steps\n\n def next_episode_results(self):\n for i in range(self._num_returned, len(self._episode_rewards)):\n yield (self._episode_rewards[i], self._episode_lengths[i])\n self._num_returned = len(self._episode_rewards)\n\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == \"NOOP\"\n\n def reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\n\nclass ClipRewardEnv(gym.RewardWrapper):\n def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)\n\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return np.sign(reward)\n\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Take action on reset.\n\n For environments that are fixed until firing.\"\"\"\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == \"FIRE\"\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert sometimes we stay in lives == 0 condtion for a few fr\n # so its important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros(\n (2, ) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2:\n self._obs_buffer[0] = obs\n if i == self._skip - 1:\n self._obs_buffer[1] = obs\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env, dim):\n \"\"\"Warp frames to the specified size (dim x dim).\"\"\"\n gym.ObservationWrapper.__init__(self, env)\n self.width = dim\n self.height = dim\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.height, self.width, 1),\n dtype=np.uint8)\n\n def observation(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(\n frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n return frame[:, :, None]\n\n\n# TODO: (sven) Deprecated class. Remove once traj. view is the norm.\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(shp[0], shp[1], shp[2] * k),\n dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return np.concatenate(self.frames, axis=2)\n\n\nclass FrameStackTrajectoryView(gym.ObservationWrapper):\n def __init__(self, env):\n \"\"\"No stacking. Trajectory View API takes care of this.\"\"\"\n gym.Wrapper.__init__(self, env)\n shp = env.observation_space.shape\n assert shp[2] == 1\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(shp[0], shp[1]),\n dtype=env.observation_space.dtype)\n\n def observation(self, observation):\n return np.squeeze(observation, axis=-1)\n\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = gym.spaces.Box(\n low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)\n\n def observation(self, observation):\n # careful! This undoes the memory optimization, use\n # with smaller replay buffers only.\n return np.array(observation).astype(np.float32) / 255.0\n\n\ndef wrap_deepmind(\n env,\n dim=84,\n # TODO: (sven) Remove once traj. view is norm.\n framestack=True,\n framestack_via_traj_view_api=False):\n \"\"\"Configure environment for DeepMind-style Atari.\n\n Note that we assume reward clipping is done outside the wrapper.\n\n Args:\n dim (int): Dimension to resize observations to (dim x dim).\n framestack (bool): Whether to framestack observations.\n \"\"\"\n env = MonitorEnv(env)\n env = NoopResetEnv(env, noop_max=30)\n if env.spec is not None and \"NoFrameskip\" in env.spec.id:\n env = MaxAndSkipEnv(env, skip=4)\n env = EpisodicLifeEnv(env)\n if \"FIRE\" in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env, dim)\n # env = ScaledFloatFrame(env) # TODO: use for dqn?\n # env = ClipRewardEnv(env) # reward clipping is handled by policy eval\n # New way of frame stacking via the trajectory view API (model config key:\n # `num_framestacks=[int]`.\n if framestack_via_traj_view_api:\n env = FrameStackTrajectoryView(env)\n # Old way (w/o traj. view API) via model config key: `framestack=True`.\n # TODO: (sven) Remove once traj. view is norm.\n elif framestack is True:\n env = FrameStack(env, 4)\n return env\n", "import copy\nimport functools\nimport gym\nimport logging\nimport numpy as np\nimport os\nimport time\nimport threading\nimport tree # pip install dm_tree\nfrom typing import Callable, Dict, List, Optional, Set, Tuple, Type, Union, \\\n TYPE_CHECKING\n\nimport ray\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper\nfrom ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.policy.rnn_sequencing import pad_batch_to_sequences_of_same_size\nfrom ray.rllib.utils import force_list, NullContextManager\nfrom ray.rllib.utils.annotations import override, DeveloperAPI\nfrom ray.rllib.utils.framework import try_import_torch\nfrom ray.rllib.utils.schedules import PiecewiseSchedule\nfrom ray.rllib.utils.threading import with_lock\nfrom ray.rllib.utils.torch_ops import convert_to_non_torch_type, \\\n convert_to_torch_tensor\nfrom ray.rllib.utils.typing import ModelGradients, ModelWeights, TensorType, \\\n TrainerConfigDict\n\nif TYPE_CHECKING:\n from ray.rllib.evaluation import MultiAgentEpisode # noqa\n\ntorch, nn = try_import_torch()\n\nlogger = logging.getLogger(__name__)\n\n\n@DeveloperAPI\nclass TorchPolicy(Policy):\n \"\"\"Template for a PyTorch policy and loss to use with RLlib.\n\n Attributes:\n observation_space (gym.Space): observation space of the policy.\n action_space (gym.Space): action space of the policy.\n config (dict): config of the policy.\n model (TorchModel): Torch model instance.\n dist_class (type): Torch action distribution class.\n \"\"\"\n\n @DeveloperAPI\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n config: TrainerConfigDict,\n *,\n model: ModelV2,\n loss: Callable[[\n Policy, ModelV2, Type[TorchDistributionWrapper], SampleBatch\n ], Union[TensorType, List[TensorType]]],\n action_distribution_class: Type[TorchDistributionWrapper],\n action_sampler_fn: Optional[Callable[[\n TensorType, List[TensorType]\n ], Tuple[TensorType, TensorType]]] = None,\n action_distribution_fn: Optional[Callable[[\n Policy, ModelV2, TensorType, TensorType, TensorType\n ], Tuple[TensorType, Type[TorchDistributionWrapper], List[\n TensorType]]]] = None,\n max_seq_len: int = 20,\n get_batch_divisibility_req: Optional[Callable[[Policy],\n int]] = None,\n ):\n \"\"\"Build a policy from policy and loss torch modules.\n\n Note that model will be placed on GPU device if CUDA_VISIBLE_DEVICES\n is set. Only single GPU is supported for now.\n\n Args:\n observation_space (gym.spaces.Space): observation space of the\n policy.\n action_space (gym.spaces.Space): action space of the policy.\n config (TrainerConfigDict): The Policy config dict.\n model (ModelV2): PyTorch policy module. Given observations as\n input, this module must return a list of outputs where the\n first item is action logits, and the rest can be any value.\n loss (Callable[[Policy, ModelV2, Type[TorchDistributionWrapper],\n SampleBatch], Union[TensorType, List[TensorType]]]): Callable\n that returns a single scalar loss or a list of loss terms.\n action_distribution_class (Type[TorchDistributionWrapper]): Class\n for a torch action distribution.\n action_sampler_fn (Callable[[TensorType, List[TensorType]],\n Tuple[TensorType, TensorType]]): A callable returning a\n sampled action and its log-likelihood given Policy, ModelV2,\n input_dict, explore, timestep, and is_training.\n action_distribution_fn (Optional[Callable[[Policy, ModelV2,\n ModelInputDict, TensorType, TensorType],\n Tuple[TensorType, type, List[TensorType]]]]): A callable\n returning distribution inputs (parameters), a dist-class to\n generate an action distribution object from, and\n internal-state outputs (or an empty list if not applicable).\n Note: No Exploration hooks have to be called from within\n `action_distribution_fn`. It's should only perform a simple\n forward pass through some model.\n If None, pass inputs through `self.model()` to get distribution\n inputs.\n The callable takes as inputs: Policy, ModelV2, ModelInputDict,\n explore, timestep, is_training.\n max_seq_len (int): Max sequence length for LSTM training.\n get_batch_divisibility_req (Optional[Callable[[Policy], int]]]):\n Optional callable that returns the divisibility requirement\n for sample batches given the Policy.\n \"\"\"\n self.framework = \"torch\"\n super().__init__(observation_space, action_space, config)\n\n # Log device and worker index.\n from ray.rllib.evaluation.rollout_worker import get_global_worker\n worker = get_global_worker()\n worker_idx = worker.worker_index if worker else 0\n\n # Create multi-GPU model towers, if necessary.\n # - The central main model will be stored under self.model, residing on\n # self.device.\n # - Each GPU will have a copy of that model under\n # self.model_gpu_towers, matching the devices in self.devices.\n # - Parallelization is done by splitting the train batch and passing\n # it through the model copies in parallel, then averaging over the\n # resulting gradients, applying these averages on the main model and\n # updating all towers' weights from the main model.\n # - In case of just one device (1 (fake) GPU or 1 CPU), no\n # parallelization will be done.\n if config[\"_fake_gpus\"] or config[\"num_gpus\"] == 0 or \\\n not torch.cuda.is_available():\n logger.info(\"TorchPolicy (worker={}) running on {}.\".format(\n worker_idx if worker_idx > 0 else \"local\",\n \"{} fake-GPUs\".format(config[\"num_gpus\"])\n if config[\"_fake_gpus\"] else \"CPU\"))\n self.device = torch.device(\"cpu\")\n self.devices = [\n self.device for _ in range(config[\"num_gpus\"] or 1)\n ]\n self.model_gpu_towers = [\n model if i == 0 else copy.deepcopy(model)\n for i in range(config[\"num_gpus\"] or 1)\n ]\n self.model = model\n else:\n logger.info(\"TorchPolicy (worker={}) running on {} GPU(s).\".format(\n worker_idx if worker_idx > 0 else \"local\", config[\"num_gpus\"]))\n gpu_ids = ray.get_gpu_ids()\n self.devices = [\n torch.device(\"cuda:{}\".format(i))\n for i, id_ in enumerate(gpu_ids) if i < config[\"num_gpus\"]\n ]\n self.device = self.devices[0]\n ids = [\n id_ for i, id_ in enumerate(gpu_ids) if i < config[\"num_gpus\"]\n ]\n self.model_gpu_towers = []\n for i, _ in enumerate(ids):\n model_copy = copy.deepcopy(model)\n self.model_gpu_towers.append(model_copy.to(self.devices[i]))\n self.model = self.model_gpu_towers[0]\n\n # Lock used for locking some methods on the object-level.\n # This prevents possible race conditions when calling the model\n # first, then its value function (e.g. in a loss function), in\n # between of which another model call is made (e.g. to compute an\n # action).\n self._lock = threading.RLock()\n\n self._state_inputs = self.model.get_initial_state()\n self._is_recurrent = len(self._state_inputs) > 0\n # Auto-update model's inference view requirements, if recurrent.\n self._update_model_view_requirements_from_init_state()\n # Combine view_requirements for Model and Policy.\n self.view_requirements.update(self.model.view_requirements)\n\n self.exploration = self._create_exploration()\n self.unwrapped_model = model # used to support DistributedDataParallel\n self._loss = loss\n self._optimizers = force_list(self.optimizer())\n # Store, which params (by index within the model's list of\n # parameters) should be updated per optimizer.\n # Maps optimizer idx to set or param indices.\n self.multi_gpu_param_groups: List[Set[int]] = []\n main_params = {p: i for i, p in enumerate(self.model.parameters())}\n for o in self._optimizers:\n param_indices = []\n for pg_idx, pg in enumerate(o.param_groups):\n for p in pg[\"params\"]:\n param_indices.append(main_params[p])\n self.multi_gpu_param_groups.append(set(param_indices))\n\n self.dist_class = action_distribution_class\n self.action_sampler_fn = action_sampler_fn\n self.action_distribution_fn = action_distribution_fn\n\n # If set, means we are using distributed allreduce during learning.\n self.distributed_world_size = None\n\n self.max_seq_len = max_seq_len\n self.batch_divisibility_req = get_batch_divisibility_req(self) if \\\n callable(get_batch_divisibility_req) else \\\n (get_batch_divisibility_req or 1)\n\n @override(Policy)\n @DeveloperAPI\n def compute_actions(\n self,\n obs_batch: Union[List[TensorType], TensorType],\n state_batches: Optional[List[TensorType]] = None,\n prev_action_batch: Union[List[TensorType], TensorType] = None,\n prev_reward_batch: Union[List[TensorType], TensorType] = None,\n info_batch: Optional[Dict[str, list]] = None,\n episodes: Optional[List[\"MultiAgentEpisode\"]] = None,\n explore: Optional[bool] = None,\n timestep: Optional[int] = None,\n **kwargs) -> \\\n Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:\n\n with torch.no_grad():\n seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)\n input_dict = self._lazy_tensor_dict(\n SampleBatch({\n SampleBatch.CUR_OBS: np.asarray(obs_batch),\n }))\n if prev_action_batch is not None:\n input_dict[SampleBatch.PREV_ACTIONS] = \\\n np.asarray(prev_action_batch)\n if prev_reward_batch is not None:\n input_dict[SampleBatch.PREV_REWARDS] = \\\n np.asarray(prev_reward_batch)\n state_batches = [\n convert_to_torch_tensor(s, self.device)\n for s in (state_batches or [])\n ]\n return self._compute_action_helper(input_dict, state_batches,\n seq_lens, explore, timestep)\n\n @override(Policy)\n def compute_actions_from_input_dict(\n self,\n input_dict: Dict[str, TensorType],\n explore: bool = None,\n timestep: Optional[int] = None,\n **kwargs) -> \\\n Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:\n\n with torch.no_grad():\n # Pass lazy (torch) tensor dict to Model as `input_dict`.\n input_dict = self._lazy_tensor_dict(input_dict)\n # Pack internal state inputs into (separate) list.\n state_batches = [\n input_dict[k] for k in input_dict.keys() if \"state_in\" in k[:8]\n ]\n # Calculate RNN sequence lengths.\n seq_lens = np.array([1] * len(input_dict[\"obs\"])) \\\n if state_batches else None\n\n return self._compute_action_helper(input_dict, state_batches,\n seq_lens, explore, timestep)\n\n @with_lock\n def _compute_action_helper(self, input_dict, state_batches, seq_lens,\n explore, timestep):\n \"\"\"Shared forward pass logic (w/ and w/o trajectory view API).\n\n Returns:\n Tuple:\n - actions, state_out, extra_fetches, logp.\n \"\"\"\n explore = explore if explore is not None else self.config[\"explore\"]\n timestep = timestep if timestep is not None else self.global_timestep\n self._is_recurrent = state_batches is not None and state_batches != []\n\n # Switch to eval mode.\n if self.model:\n self.model.eval()\n\n if self.action_sampler_fn:\n action_dist = dist_inputs = None\n actions, logp, state_out = self.action_sampler_fn(\n self,\n self.model,\n input_dict,\n state_batches,\n explore=explore,\n timestep=timestep)\n else:\n # Call the exploration before_compute_actions hook.\n self.exploration.before_compute_actions(\n explore=explore, timestep=timestep)\n if self.action_distribution_fn:\n # Try new action_distribution_fn signature, supporting\n # state_batches and seq_lens.\n try:\n dist_inputs, dist_class, state_out = \\\n self.action_distribution_fn(\n self,\n self.model,\n input_dict=input_dict,\n state_batches=state_batches,\n seq_lens=seq_lens,\n explore=explore,\n timestep=timestep,\n is_training=False)\n # Trying the old way (to stay backward compatible).\n # TODO: Remove in future.\n except TypeError as e:\n if \"positional argument\" in e.args[0] or \\\n \"unexpected keyword argument\" in e.args[0]:\n dist_inputs, dist_class, state_out = \\\n self.action_distribution_fn(\n self,\n self.model,\n input_dict[SampleBatch.CUR_OBS],\n explore=explore,\n timestep=timestep,\n is_training=False)\n else:\n raise e\n else:\n dist_class = self.dist_class\n dist_inputs, state_out = self.model(input_dict, state_batches,\n seq_lens)\n\n if not (isinstance(dist_class, functools.partial)\n or issubclass(dist_class, TorchDistributionWrapper)):\n raise ValueError(\n \"`dist_class` ({}) not a TorchDistributionWrapper \"\n \"subclass! Make sure your `action_distribution_fn` or \"\n \"`make_model_and_action_dist` return a correct \"\n \"distribution class.\".format(dist_class.__name__))\n action_dist = dist_class(dist_inputs, self.model)\n\n # Get the exploration action from the forward results.\n actions, logp = \\\n self.exploration.get_exploration_action(\n action_distribution=action_dist,\n timestep=timestep,\n explore=explore)\n\n input_dict[SampleBatch.ACTIONS] = actions\n\n # Add default and custom fetches.\n extra_fetches = self.extra_action_out(input_dict, state_batches,\n self.model, action_dist)\n\n # Action-dist inputs.\n if dist_inputs is not None:\n extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs\n\n # Action-logp and action-prob.\n if logp is not None:\n extra_fetches[SampleBatch.ACTION_PROB] = \\\n torch.exp(logp.float())\n extra_fetches[SampleBatch.ACTION_LOGP] = logp\n\n # Update our global timestep by the batch size.\n self.global_timestep += len(input_dict[SampleBatch.CUR_OBS])\n\n return convert_to_non_torch_type((actions, state_out, extra_fetches))\n\n @with_lock\n @override(Policy)\n @DeveloperAPI\n def compute_log_likelihoods(\n self,\n actions: Union[List[TensorType], TensorType],\n obs_batch: Union[List[TensorType], TensorType],\n state_batches: Optional[List[TensorType]] = None,\n prev_action_batch: Optional[Union[List[TensorType],\n TensorType]] = None,\n prev_reward_batch: Optional[Union[List[\n TensorType], TensorType]] = None) -> TensorType:\n\n if self.action_sampler_fn and self.action_distribution_fn is None:\n raise ValueError(\"Cannot compute log-prob/likelihood w/o an \"\n \"`action_distribution_fn` and a provided \"\n \"`action_sampler_fn`!\")\n\n with torch.no_grad():\n input_dict = self._lazy_tensor_dict({\n SampleBatch.CUR_OBS: obs_batch,\n SampleBatch.ACTIONS: actions\n })\n if prev_action_batch is not None:\n input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch\n if prev_reward_batch is not None:\n input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch\n seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)\n state_batches = [\n convert_to_torch_tensor(s, self.device)\n for s in (state_batches or [])\n ]\n\n # Exploration hook before each forward pass.\n self.exploration.before_compute_actions(explore=False)\n\n # Action dist class and inputs are generated via custom function.\n if self.action_distribution_fn:\n\n # Try new action_distribution_fn signature, supporting\n # state_batches and seq_lens.\n try:\n dist_inputs, dist_class, state_out = \\\n self.action_distribution_fn(\n self,\n self.model,\n input_dict=input_dict,\n state_batches=state_batches,\n seq_lens=seq_lens,\n explore=False,\n is_training=False)\n # Trying the old way (to stay backward compatible).\n # TODO: Remove in future.\n except TypeError as e:\n if \"positional argument\" in e.args[0] or \\\n \"unexpected keyword argument\" in e.args[0]:\n dist_inputs, dist_class, _ = \\\n self.action_distribution_fn(\n policy=self,\n model=self.model,\n obs_batch=input_dict[SampleBatch.CUR_OBS],\n explore=False,\n is_training=False)\n else:\n raise e\n\n # Default action-dist inputs calculation.\n else:\n dist_class = self.dist_class\n dist_inputs, _ = self.model(input_dict, state_batches,\n seq_lens)\n\n action_dist = dist_class(dist_inputs, self.model)\n log_likelihoods = action_dist.logp(input_dict[SampleBatch.ACTIONS])\n\n return log_likelihoods\n\n @with_lock\n @override(Policy)\n @DeveloperAPI\n def learn_on_batch(\n self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:\n\n # Set Model to train mode.\n if self.model:\n self.model.train()\n # Callback handling.\n learn_stats = {}\n self.callbacks.on_learn_on_batch(\n policy=self, train_batch=postprocessed_batch, result=learn_stats)\n\n # Compute gradients (will calculate all losses and `backward()`\n # them to get the grads).\n grads, fetches = self.compute_gradients(postprocessed_batch)\n\n # Step the optimizers.\n self.apply_gradients(_directStepOptimizerSingleton)\n\n if self.model:\n fetches[\"model\"] = self.model.metrics()\n fetches.update({\"custom_metrics\": learn_stats})\n\n return fetches\n\n @with_lock\n @override(Policy)\n @DeveloperAPI\n def compute_gradients(self,\n postprocessed_batch: SampleBatch) -> ModelGradients:\n\n # For multi-GPU, split the batch into n slices (n=#GPUs).\n if len(self.devices) == 1:\n batches = [postprocessed_batch]\n else:\n from ray.rllib.utils.sgd import minibatches\n batches = list(\n minibatches(\n postprocessed_batch,\n len(postprocessed_batch) // len(self.devices),\n shuffle=False))\n\n if not isinstance(postprocessed_batch, SampleBatch) or \\\n not postprocessed_batch.zero_padded:\n for b in batches:\n pad_batch_to_sequences_of_same_size(\n b,\n max_seq_len=self.max_seq_len,\n shuffle=False,\n batch_divisibility_req=self.batch_divisibility_req,\n view_requirements=self.view_requirements,\n )\n\n for b, d in zip(batches, self.devices):\n b.is_training = True\n self._lazy_tensor_dict(b, device=d)\n\n # Multi-GPU case: Slice inputs into n (roughly) equal batches.\n if len(self.devices) > 1:\n # Copy weights of main model to all towers.\n state_dict = self.model.state_dict()\n for tower in self.model_gpu_towers:\n tower.load_state_dict(state_dict)\n\n # Do the (maybe parallelized) gradient calculation step.\n tower_outputs = self._multi_gpu_parallel_grad_calc(batches)\n\n # Multi device (GPU) case.\n if len(self.devices) > 1:\n # Mean-reduce over GPU-towers.\n all_grads = []\n for i in range(len(tower_outputs[0][0])):\n if tower_outputs[0][0][i] is not None:\n all_grads.append(\n torch.mean(\n torch.stack([\n t[0][i].to(self.device) for t in tower_outputs\n ]),\n dim=0))\n else:\n all_grads.append(None)\n # Set main model's grads to mean-reduced values.\n for i, p in enumerate(self.model.parameters()):\n p.grad = all_grads[i]\n # Reduce stats over towers as well.\n from ray.rllib.execution.train_ops import all_tower_reduce\n grad_info = tree.map_structure_with_path(\n lambda p, *t: all_tower_reduce(p, *t),\n *[t[1] for t in tower_outputs])\n # Single device case.\n else:\n all_grads, grad_info = tower_outputs[0]\n\n grad_info[\"allreduce_latency\"] /= len(self._optimizers)\n grad_info.update(self.extra_grad_info(postprocessed_batch))\n\n fetches = self.extra_compute_grad_fetches()\n\n return all_grads, dict(fetches, **{LEARNER_STATS_KEY: grad_info})\n\n @override(Policy)\n @DeveloperAPI\n def apply_gradients(self, gradients: ModelGradients) -> None:\n if gradients == _directStepOptimizerSingleton:\n for i, opt in enumerate(self._optimizers):\n opt.step()\n else:\n # TODO(sven): Not supported for multiple optimizers yet.\n assert len(self._optimizers) == 1\n for g, p in zip(gradients, self.model.parameters()):\n if g is not None:\n if torch.is_tensor(g):\n p.grad = g.to(self.device)\n else:\n p.grad = torch.from_numpy(g).to(self.device)\n\n self._optimizers[0].step()\n\n @override(Policy)\n @DeveloperAPI\n def get_weights(self) -> ModelWeights:\n return {\n k: v.cpu().detach().numpy()\n for k, v in self.model.state_dict().items()\n }\n\n @override(Policy)\n @DeveloperAPI\n def set_weights(self, weights: ModelWeights) -> None:\n weights = convert_to_torch_tensor(weights, device=self.device)\n self.model.load_state_dict(weights)\n\n @override(Policy)\n @DeveloperAPI\n def is_recurrent(self) -> bool:\n return self._is_recurrent\n\n @override(Policy)\n @DeveloperAPI\n def num_state_tensors(self) -> int:\n return len(self.model.get_initial_state())\n\n @override(Policy)\n @DeveloperAPI\n def get_initial_state(self) -> List[TensorType]:\n return [\n s.detach().cpu().numpy() for s in self.model.get_initial_state()\n ]\n\n @override(Policy)\n @DeveloperAPI\n def get_state(self) -> Union[Dict[str, TensorType], List[TensorType]]:\n state = super().get_state()\n state[\"_optimizer_variables\"] = []\n for i, o in enumerate(self._optimizers):\n optim_state_dict = convert_to_non_torch_type(o.state_dict())\n state[\"_optimizer_variables\"].append(optim_state_dict)\n return state\n\n @override(Policy)\n @DeveloperAPI\n def set_state(self, state: object) -> None:\n state = state.copy() # shallow copy\n # Set optimizer vars first.\n optimizer_vars = state.pop(\"_optimizer_variables\", None)\n if optimizer_vars:\n assert len(optimizer_vars) == len(self._optimizers)\n for o, s in zip(self._optimizers, optimizer_vars):\n optim_state_dict = convert_to_torch_tensor(\n s, device=self.device)\n o.load_state_dict(optim_state_dict)\n # Then the Policy's (NN) weights.\n super().set_state(state)\n\n @DeveloperAPI\n def extra_grad_process(self, optimizer: \"torch.optim.Optimizer\",\n loss: TensorType):\n \"\"\"Called after each optimizer.zero_grad() + loss.backward() call.\n\n Called for each self._optimizers/loss-value pair.\n Allows for gradient processing before optimizer.step() is called.\n E.g. for gradient clipping.\n\n Args:\n optimizer (torch.optim.Optimizer): A torch optimizer object.\n loss (TensorType): The loss tensor associated with the optimizer.\n\n Returns:\n Dict[str, TensorType]: An dict with information on the gradient\n processing step.\n \"\"\"\n return {}\n\n @DeveloperAPI\n def extra_compute_grad_fetches(self) -> Dict[str, any]:\n \"\"\"Extra values to fetch and return from compute_gradients().\n\n Returns:\n Dict[str, any]: Extra fetch dict to be added to the fetch dict\n of the compute_gradients call.\n \"\"\"\n return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.\n\n @DeveloperAPI\n def extra_action_out(\n self, input_dict: Dict[str, TensorType],\n state_batches: List[TensorType], model: TorchModelV2,\n action_dist: TorchDistributionWrapper) -> Dict[str, TensorType]:\n \"\"\"Returns dict of extra info to include in experience batch.\n\n Args:\n input_dict (Dict[str, TensorType]): Dict of model input tensors.\n state_batches (List[TensorType]): List of state tensors.\n model (TorchModelV2): Reference to the model object.\n action_dist (TorchDistributionWrapper): Torch action dist object\n to get log-probs (e.g. for already sampled actions).\n\n Returns:\n Dict[str, TensorType]: Extra outputs to return in a\n compute_actions() call (3rd return value).\n \"\"\"\n return {}\n\n @DeveloperAPI\n def extra_grad_info(self,\n train_batch: SampleBatch) -> Dict[str, TensorType]:\n \"\"\"Return dict of extra grad info.\n\n Args:\n train_batch (SampleBatch): The training batch for which to produce\n extra grad info for.\n\n Returns:\n Dict[str, TensorType]: The info dict carrying grad info per str\n key.\n \"\"\"\n return {}\n\n @DeveloperAPI\n def optimizer(\n self\n ) -> Union[List[\"torch.optim.Optimizer\"], \"torch.optim.Optimizer\"]:\n \"\"\"Custom the local PyTorch optimizer(s) to use.\n\n Returns:\n Union[List[torch.optim.Optimizer], torch.optim.Optimizer]:\n The local PyTorch optimizer(s) to use for this Policy.\n \"\"\"\n if hasattr(self, \"config\"):\n return torch.optim.Adam(\n self.model.parameters(), lr=self.config[\"lr\"])\n else:\n return torch.optim.Adam(self.model.parameters())\n\n @override(Policy)\n @DeveloperAPI\n def export_model(self, export_dir: str) -> None:\n \"\"\"Exports the Policy's Model to local directory for serving.\n\n Creates a TorchScript model and saves it.\n\n Args:\n export_dir (str): Local writable directory or filename.\n \"\"\"\n self._lazy_tensor_dict(self._dummy_batch)\n # Provide dummy state inputs if not an RNN (torch cannot jit with\n # returned empty internal states list).\n if \"state_in_0\" not in self._dummy_batch:\n self._dummy_batch[\"state_in_0\"] = \\\n self._dummy_batch[\"seq_lens\"] = np.array([1.0])\n seq_lens = self._dummy_batch[\"seq_lens\"]\n\n state_ins = []\n i = 0\n while \"state_in_{}\".format(i) in self._dummy_batch:\n state_ins.append(self._dummy_batch[\"state_in_{}\".format(i)])\n i += 1\n dummy_inputs = {\n k: self._dummy_batch[k]\n for k in self._dummy_batch.keys() if k != \"is_training\"\n }\n traced = torch.jit.trace(self.model,\n (dummy_inputs, state_ins, seq_lens))\n if not os.path.exists(export_dir):\n os.makedirs(export_dir)\n file_name = os.path.join(export_dir, \"model.pt\")\n traced.save(file_name)\n\n @override(Policy)\n @DeveloperAPI\n def export_checkpoint(self, export_dir: str) -> None:\n \"\"\"TODO(sven): implement for torch.\n \"\"\"\n raise NotImplementedError\n\n @override(Policy)\n @DeveloperAPI\n def import_model_from_h5(self, import_file: str) -> None:\n \"\"\"Imports weights into torch model.\"\"\"\n return self.model.import_from_h5(import_file)\n\n def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None):\n # TODO: (sven): Keep for a while to ensure backward compatibility.\n if not isinstance(postprocessed_batch, SampleBatch):\n postprocessed_batch = SampleBatch(postprocessed_batch)\n postprocessed_batch.set_get_interceptor(\n functools.partial(\n convert_to_torch_tensor, device=device or self.device))\n return postprocessed_batch\n\n def _multi_gpu_parallel_grad_calc(self, sample_batches):\n \"\"\"Performs a parallelized loss and gradient calculation over the batch.\n\n Splits up the given train batch into n shards (n=number of this\n Policy's devices) and passes each data shard (in parallel) through\n the loss function using the individual devices' models\n (self.model_gpu_towers). Then returns each tower's outputs.\n\n Args:\n sample_batches (List[SampleBatch]): A list of SampleBatch shards to\n calculate loss and gradients for.\n\n Returns:\n List[Tuple[List[TensorType], StatsDict]]: A list (one item per\n device) of 2-tuples with 1) gradient list and 2) stats dict.\n \"\"\"\n assert len(self.model_gpu_towers) == len(sample_batches)\n lock = threading.Lock()\n results = {}\n grad_enabled = torch.is_grad_enabled()\n\n def _worker(shard_idx, model, sample_batch, device):\n torch.set_grad_enabled(grad_enabled)\n try:\n with NullContextManager(\n ) if device.type == \"cpu\" else torch.cuda.device(device):\n loss_out = force_list(\n self._loss(self, model, self.dist_class, sample_batch))\n\n # Call Model's custom-loss with Policy loss outputs and\n # train_batch.\n loss_out = model.custom_loss(loss_out, sample_batch)\n\n assert len(loss_out) == len(self._optimizers)\n\n # Loop through all optimizers.\n grad_info = {\"allreduce_latency\": 0.0}\n\n parameters = list(model.parameters())\n all_grads = [None for _ in range(len(parameters))]\n for opt_idx, opt in enumerate(self._optimizers):\n # Erase gradients in all vars of the tower that this\n # optimizer would affect.\n param_indices = self.multi_gpu_param_groups[opt_idx]\n for param_idx, param in enumerate(parameters):\n if param_idx in param_indices and \\\n param.grad is not None:\n param.grad.data.zero_()\n # Recompute gradients of loss over all variables.\n loss_out[opt_idx].backward(retain_graph=True)\n grad_info.update(\n self.extra_grad_process(opt, loss_out[opt_idx]))\n\n grads = []\n # Note that return values are just references;\n # Calling zero_grad would modify the values.\n for param_idx, param in enumerate(parameters):\n if param_idx in param_indices:\n if param.grad is not None:\n grads.append(param.grad)\n all_grads[param_idx] = param.grad\n\n if self.distributed_world_size:\n start = time.time()\n if torch.cuda.is_available():\n # Sadly, allreduce_coalesced does not work with\n # CUDA yet.\n for g in grads:\n torch.distributed.all_reduce(\n g, op=torch.distributed.ReduceOp.SUM)\n else:\n torch.distributed.all_reduce_coalesced(\n grads, op=torch.distributed.ReduceOp.SUM)\n\n for param_group in opt.param_groups:\n for p in param_group[\"params\"]:\n if p.grad is not None:\n p.grad /= self.distributed_world_size\n\n grad_info[\n \"allreduce_latency\"] += time.time() - start\n\n with lock:\n results[shard_idx] = (all_grads, grad_info)\n except Exception as e:\n with lock:\n results[shard_idx] = ValueError(\n e.args[0] + \"\\n\" +\n \"In tower {} on device {}\".format(shard_idx, device))\n\n # Single device (GPU) or fake-GPU case (serialize for better\n # debugging).\n if len(self.devices) == 1 or self.config[\"_fake_gpus\"]:\n for shard_idx, (model, sample_batch, device) in enumerate(\n zip(self.model_gpu_towers, sample_batches, self.devices)):\n _worker(shard_idx, model, sample_batch, device)\n # Raise errors right away for better debugging.\n last_result = results[len(results) - 1]\n if isinstance(last_result, ValueError):\n raise last_result\n # Multi device (GPU) case: Parallelize via threads.\n else:\n threads = [\n threading.Thread(\n target=_worker,\n args=(shard_idx, model, sample_batch, device))\n for shard_idx, (model, sample_batch, device) in enumerate(\n zip(self.model_gpu_towers, sample_batches, self.devices))\n ]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n # Gather all threads' outputs and return.\n outputs = []\n for shard_idx in range(len(sample_batches)):\n output = results[shard_idx]\n if isinstance(output, Exception):\n raise output\n outputs.append(results[shard_idx])\n return outputs\n\n\n# TODO: (sven) Unify hyperparam annealing procedures across RLlib (tf/torch)\n# and for all possible hyperparams, not just lr.\n@DeveloperAPI\nclass LearningRateSchedule:\n \"\"\"Mixin for TFPolicy that adds a learning rate schedule.\"\"\"\n\n @DeveloperAPI\n def __init__(self, lr, lr_schedule):\n self._lr_schedule = None\n if lr_schedule is None:\n self.cur_lr = lr\n else:\n self._lr_schedule = PiecewiseSchedule(\n lr_schedule, outside_value=lr_schedule[-1][-1], framework=None)\n self.cur_lr = self._lr_schedule.value(0)\n\n @override(Policy)\n def on_global_var_update(self, global_vars):\n super().on_global_var_update(global_vars)\n if self._lr_schedule:\n self.cur_lr = self._lr_schedule.value(global_vars[\"timestep\"])\n for opt in self._optimizers:\n for p in opt.param_groups:\n p[\"lr\"] = self.cur_lr\n\n\n@DeveloperAPI\nclass EntropyCoeffSchedule:\n \"\"\"Mixin for TorchPolicy that adds entropy coeff decay.\"\"\"\n\n @DeveloperAPI\n def __init__(self, entropy_coeff, entropy_coeff_schedule):\n self._entropy_coeff_schedule = None\n if entropy_coeff_schedule is None:\n self.entropy_coeff = entropy_coeff\n else:\n # Allows for custom schedule similar to lr_schedule format\n if isinstance(entropy_coeff_schedule, list):\n self._entropy_coeff_schedule = PiecewiseSchedule(\n entropy_coeff_schedule,\n outside_value=entropy_coeff_schedule[-1][-1],\n framework=None)\n else:\n # Implements previous version but enforces outside_value\n self._entropy_coeff_schedule = PiecewiseSchedule(\n [[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],\n outside_value=0.0,\n framework=None)\n self.entropy_coeff = self._entropy_coeff_schedule.value(0)\n\n @override(Policy)\n def on_global_var_update(self, global_vars):\n super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)\n if self._entropy_coeff_schedule is not None:\n self.entropy_coeff = self._entropy_coeff_schedule.value(\n global_vars[\"timestep\"])\n\n\n@DeveloperAPI\nclass DirectStepOptimizer:\n \"\"\"Typesafe method for indicating apply gradients can directly step the\n optimizers with in-place gradients.\n \"\"\"\n _instance = None\n\n def __new__(cls):\n if DirectStepOptimizer._instance is None:\n DirectStepOptimizer._instance = super().__new__(cls)\n return DirectStepOptimizer._instance\n\n def __eq__(self, other):\n return type(self) == type(other)\n\n def __repr__(self):\n return \"DirectStepOptimizer\"\n\n\n_directStepOptimizerSingleton = DirectStepOptimizer()\n", "import logging\nimport os\n\nimport numpy as np\n\nimport ray.ray_constants as ray_constants\n\nlogger = logging.getLogger(__name__)\n\n\nclass RayParams:\n \"\"\"A class used to store the parameters used by Ray.\n\n Attributes:\n redis_address (str): The address of the Redis server to connect to. If\n this address is not provided, then this command will start Redis, a\n raylet, a plasma store, a plasma manager, and some workers.\n It will also kill these processes when Python exits.\n redis_port (int): The port that the primary Redis shard should listen\n to. If None, then it will fall back to\n ray.ray_constants.DEFAULT_PORT, or a random port if the default is\n not available.\n redis_shard_ports: A list of the ports to use for the non-primary Redis\n shards. If None, then it will fall back to the ports right after\n redis_port, or random ports if those are not available.\n num_cpus (int): Number of CPUs to configure the raylet with.\n num_gpus (int): Number of GPUs to configure the raylet with.\n resources: A dictionary mapping the name of a resource to the quantity\n of that resource available.\n memory: Total available memory for workers requesting memory.\n object_store_memory: The amount of memory (in bytes) to start the\n object store with.\n redis_max_memory: The max amount of memory (in bytes) to allow redis\n to use, or None for no limit. Once the limit is exceeded, redis\n will start LRU eviction of entries. This only applies to the\n sharded redis tables (task and object tables).\n object_manager_port int: The port to use for the object manager.\n node_manager_port: The port to use for the node manager.\n gcs_server_port: The port to use for the GCS server.\n node_ip_address (str): The IP address of the node that we are on.\n raylet_ip_address (str): The IP address of the raylet that this node\n connects to.\n min_worker_port (int): The lowest port number that workers will bind\n on. If not set or set to 0, random ports will be chosen.\n max_worker_port (int): The highest port number that workers will bind\n on. If set, min_worker_port must also be set.\n worker_port_list (str): An explicit list of ports to be used for\n workers (comma-separated). Overrides min_worker_port and\n max_worker_port.\n ray_client_server_port (int): The port number the ray client server\n will bind on. If not set, the ray client server will not\n be started.\n object_ref_seed (int): Used to seed the deterministic generation of\n object refs. The same value can be used across multiple runs of the\n same job in order to generate the object refs in a consistent\n manner. However, the same ID should not be used for different jobs.\n redirect_worker_output: True if the stdout and stderr of worker\n processes should be redirected to files.\n redirect_output (bool): True if stdout and stderr for non-worker\n processes should be redirected to files and false otherwise.\n num_redis_shards: The number of Redis shards to start in addition to\n the primary Redis shard.\n redis_max_clients: If provided, attempt to configure Redis with this\n maxclients number.\n redis_password (str): Prevents external clients without the password\n from connecting to Redis if provided.\n plasma_directory: A directory where the Plasma memory mapped files will\n be created.\n worker_path (str): The path of the source code that will be run by the\n worker.\n setup_worker_path (str): The path of the Python file that will run\n worker_setup_hook to set up the environment for the worker process.\n worker_setup_hook (str): The module path to a Python function that will\n be imported and run to set up the environment for the worker.\n huge_pages: Boolean flag indicating whether to start the Object\n Store with hugetlbfs support. Requires plasma_directory.\n include_dashboard: Boolean flag indicating whether to start the web\n UI, which displays the status of the Ray cluster. If this value is\n None, then the UI will be started if the relevant dependencies are\n present.\n dashboard_host: The host to bind the web UI server to. Can either be\n localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).\n By default, this is set to localhost to prevent access from\n external machines.\n dashboard_port: The port to bind the dashboard server to.\n Defaults to 8265.\n logging_level: Logging level, default will be logging.INFO.\n logging_format: Logging format, default contains a timestamp,\n filename, line number, and message. See ray_constants.py.\n plasma_store_socket_name (str): If provided, it will specify the socket\n name used by the plasma store.\n raylet_socket_name (str): If provided, it will specify the socket path\n used by the raylet process.\n temp_dir (str): If provided, it will specify the root temporary\n directory for the Ray process.\n include_log_monitor (bool): If True, then start a log monitor to\n monitor the log files for all processes on this node and push their\n contents to Redis.\n autoscaling_config: path to autoscaling config file.\n metrics_agent_port(int): The port to bind metrics agent.\n metrics_export_port(int): The port at which metrics are exposed\n through a Prometheus endpoint.\n no_monitor(bool): If True, the ray autoscaler monitor for this cluster\n will not be started.\n _system_config (dict): Configuration for overriding RayConfig\n defaults. Used to set system configuration and for experimental Ray\n core feature flags.\n enable_object_reconstruction (bool): Enable plasma reconstruction on\n failure.\n start_initial_python_workers_for_first_job (bool): If true, start\n initial Python workers for the first job on the node.\n \"\"\"\n\n def __init__(self,\n redis_address=None,\n num_cpus=None,\n num_gpus=None,\n resources=None,\n memory=None,\n object_store_memory=None,\n redis_max_memory=None,\n redis_port=None,\n redis_shard_ports=None,\n object_manager_port=None,\n node_manager_port=0,\n gcs_server_port=None,\n node_ip_address=None,\n raylet_ip_address=None,\n min_worker_port=None,\n max_worker_port=None,\n worker_port_list=None,\n ray_client_server_port=None,\n object_ref_seed=None,\n driver_mode=None,\n redirect_worker_output=None,\n redirect_output=None,\n num_redis_shards=None,\n redis_max_clients=None,\n redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,\n plasma_directory=None,\n worker_path=None,\n setup_worker_path=None,\n worker_setup_hook=ray_constants.DEFAULT_WORKER_SETUP_HOOK,\n huge_pages=False,\n include_dashboard=None,\n dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,\n dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,\n logging_level=logging.INFO,\n logging_format=ray_constants.LOGGER_FORMAT,\n plasma_store_socket_name=None,\n raylet_socket_name=None,\n temp_dir=None,\n include_log_monitor=None,\n autoscaling_config=None,\n start_initial_python_workers_for_first_job=False,\n _system_config=None,\n enable_object_reconstruction=False,\n metrics_agent_port=None,\n metrics_export_port=None,\n tracing_startup_hook=None,\n no_monitor=False,\n lru_evict=False):\n self.object_ref_seed = object_ref_seed\n self.redis_address = redis_address\n self.num_cpus = num_cpus\n self.num_gpus = num_gpus\n self.memory = memory\n self.object_store_memory = object_store_memory\n self.resources = resources\n self.redis_max_memory = redis_max_memory\n self.redis_port = redis_port\n self.redis_shard_ports = redis_shard_ports\n self.object_manager_port = object_manager_port\n self.node_manager_port = node_manager_port\n self.gcs_server_port = gcs_server_port\n self.node_ip_address = node_ip_address\n self.raylet_ip_address = raylet_ip_address\n self.min_worker_port = min_worker_port\n self.max_worker_port = max_worker_port\n self.worker_port_list = worker_port_list\n self.ray_client_server_port = ray_client_server_port\n self.driver_mode = driver_mode\n self.redirect_worker_output = redirect_worker_output\n self.redirect_output = redirect_output\n self.num_redis_shards = num_redis_shards\n self.redis_max_clients = redis_max_clients\n self.redis_password = redis_password\n self.plasma_directory = plasma_directory\n self.worker_path = worker_path\n self.setup_worker_path = setup_worker_path\n self.worker_setup_hook = worker_setup_hook\n self.huge_pages = huge_pages\n self.include_dashboard = include_dashboard\n self.dashboard_host = dashboard_host\n self.dashboard_port = dashboard_port\n self.plasma_store_socket_name = plasma_store_socket_name\n self.raylet_socket_name = raylet_socket_name\n self.temp_dir = temp_dir\n self.include_log_monitor = include_log_monitor\n self.autoscaling_config = autoscaling_config\n self.metrics_agent_port = metrics_agent_port\n self.metrics_export_port = metrics_export_port\n self.tracing_startup_hook = tracing_startup_hook\n self.no_monitor = no_monitor\n self.start_initial_python_workers_for_first_job = (\n start_initial_python_workers_for_first_job)\n self._system_config = _system_config or {}\n self._enable_object_reconstruction = enable_object_reconstruction\n self._check_usage()\n\n # Set the internal config options for LRU eviction.\n if lru_evict:\n raise DeprecationWarning(\n \"The lru_evict flag is deprecated as Ray natively \"\n \"supports object spilling. Please read \"\n \"https://docs.ray.io/en/master/memory-management.html#object-spilling \" # noqa\n \"for more details.\")\n\n # Set the internal config options for object reconstruction.\n if enable_object_reconstruction:\n # Turn off object pinning.\n if self._system_config is None:\n self._system_config = dict()\n print(self._system_config)\n self._system_config[\"lineage_pinning_enabled\"] = True\n self._system_config[\"free_objects_period_milliseconds\"] = -1\n\n def update(self, **kwargs):\n \"\"\"Update the settings according to the keyword arguments.\n\n Args:\n kwargs: The keyword arguments to set corresponding fields.\n \"\"\"\n for arg in kwargs:\n if hasattr(self, arg):\n setattr(self, arg, kwargs[arg])\n else:\n raise ValueError(\n f\"Invalid RayParams parameter in update: {arg}\")\n\n self._check_usage()\n\n def update_if_absent(self, **kwargs):\n \"\"\"Update the settings when the target fields are None.\n\n Args:\n kwargs: The keyword arguments to set corresponding fields.\n \"\"\"\n for arg in kwargs:\n if hasattr(self, arg):\n if getattr(self, arg) is None:\n setattr(self, arg, kwargs[arg])\n else:\n raise ValueError(\"Invalid RayParams parameter in\"\n \" update_if_absent: %s\" % arg)\n\n self._check_usage()\n\n def update_pre_selected_port(self):\n \"\"\"Update the pre-selected port information\n\n Returns:\n The dictionary mapping of component -> ports.\n \"\"\"\n\n def wrap_port(port):\n # 0 port means select a random port for the grpc server.\n if port is None or port == 0:\n return []\n else:\n return [port]\n\n # Create a dictionary of the component -> port mapping.\n pre_selected_ports = {\n \"gcs\": wrap_port(self.redis_port),\n \"object_manager\": wrap_port(self.object_manager_port),\n \"node_manager\": wrap_port(self.node_manager_port),\n \"gcs_server\": wrap_port(self.gcs_server_port),\n \"client_server\": wrap_port(self.ray_client_server_port),\n \"dashboard\": wrap_port(self.dashboard_port),\n \"dashboard_agent\": wrap_port(self.metrics_agent_port),\n \"metrics_export\": wrap_port(self.metrics_export_port),\n }\n redis_shard_ports = self.redis_shard_ports\n if redis_shard_ports is None:\n redis_shard_ports = []\n pre_selected_ports[\"redis_shards\"] = redis_shard_ports\n if self.worker_port_list is None:\n if (self.min_worker_port is not None\n and self.max_worker_port is not None):\n pre_selected_ports[\"worker_ports\"] = list(\n range(self.min_worker_port, self.max_worker_port + 1))\n else:\n # The dict is not updated when it requires random ports.\n pre_selected_ports[\"worker_ports\"] = []\n else:\n pre_selected_ports[\"worker_ports\"] = [\n int(port) for port in self.worker_port_list.split(\",\")\n ]\n\n # Update the pre selected port set.\n self.reserved_ports = set()\n for comp, port_list in pre_selected_ports.items():\n for port in port_list:\n if port in self.reserved_ports:\n raise ValueError(\n f\"Ray component {comp} is trying to use \"\n f\"a port number {port} that is used by \"\n \"other components.\\n\"\n f\"Port information: {pre_selected_ports}\\n\"\n \"If you allocate ports, \"\n \"please make sure the same port is not used by \"\n \"multiple components.\")\n self.reserved_ports.add(port)\n\n def _check_usage(self):\n if self.worker_port_list is not None:\n for port_str in self.worker_port_list.split(\",\"):\n try:\n port = int(port_str)\n except ValueError as e:\n raise ValueError(\n \"worker_port_list must be a comma-separated \" +\n \"list of integers: {}\".format(e)) from None\n\n if port < 1024 or port > 65535:\n raise ValueError(\n \"Ports in worker_port_list must be \"\n \"between 1024 and 65535. Got: {}\".format(port))\n\n # Used primarily for testing.\n if os.environ.get(\"RAY_USE_RANDOM_PORTS\", False):\n if self.min_worker_port is None and self.max_worker_port is None:\n self.min_worker_port = 0\n self.max_worker_port = 0\n\n if self.min_worker_port is not None:\n if self.min_worker_port != 0 and (self.min_worker_port < 1024\n or self.min_worker_port > 65535):\n raise ValueError(\"min_worker_port must be 0 or an integer \"\n \"between 1024 and 65535.\")\n\n if self.max_worker_port is not None:\n if self.min_worker_port is None:\n raise ValueError(\"If max_worker_port is set, min_worker_port \"\n \"must also be set.\")\n elif self.max_worker_port != 0:\n if self.max_worker_port < 1024 or self.max_worker_port > 65535:\n raise ValueError(\n \"max_worker_port must be 0 or an integer between \"\n \"1024 and 65535.\")\n elif self.max_worker_port <= self.min_worker_port:\n raise ValueError(\"max_worker_port must be higher than \"\n \"min_worker_port.\")\n\n if self.ray_client_server_port is not None:\n if (self.ray_client_server_port < 1024\n or self.ray_client_server_port > 65535):\n raise ValueError(\"ray_client_server_port must be an integer \"\n \"between 1024 and 65535.\")\n\n if self.resources is not None:\n assert \"CPU\" not in self.resources, (\n \"'CPU' should not be included in the resource dictionary. Use \"\n \"num_cpus instead.\")\n assert \"GPU\" not in self.resources, (\n \"'GPU' should not be included in the resource dictionary. Use \"\n \"num_gpus instead.\")\n\n if self.redirect_worker_output is not None:\n raise DeprecationWarning(\n \"The redirect_worker_output argument is deprecated. To \"\n \"control logging to the driver, use the 'log_to_driver' \"\n \"argument to 'ray.init()'\")\n\n if self.redirect_output is not None:\n raise DeprecationWarning(\n \"The redirect_output argument is deprecated.\")\n\n # Parse the numpy version.\n numpy_version = np.__version__.split(\".\")\n numpy_major, numpy_minor = int(numpy_version[0]), int(numpy_version[1])\n if numpy_major <= 1 and numpy_minor < 16:\n logger.warning(\"Using ray with numpy < 1.16.0 will result in slow \"\n \"serialization. Upgrade numpy if using with ray.\")\n", "from collections import defaultdict\nimport numpy as np\nimport random\nfrom typing import List, Dict, Callable, Any, TYPE_CHECKING\n\nfrom ray.rllib.env.base_env import _DUMMY_AGENT_ID\nfrom ray.rllib.policy.policy import Policy\nfrom ray.rllib.utils.annotations import DeveloperAPI\nfrom ray.rllib.utils.spaces.space_utils import flatten_to_single_ndarray\nfrom ray.rllib.utils.typing import SampleBatchType, AgentID, PolicyID, \\\n EnvActionType, EnvID, EnvInfoDict, EnvObsType\n\nif TYPE_CHECKING:\n from ray.rllib.evaluation.sample_batch_builder import \\\n MultiAgentSampleBatchBuilder\n\n\n@DeveloperAPI\nclass MultiAgentEpisode:\n \"\"\"Tracks the current state of a (possibly multi-agent) episode.\n\n Attributes:\n new_batch_builder (func): Create a new MultiAgentSampleBatchBuilder.\n add_extra_batch (func): Return a built MultiAgentBatch to the sampler.\n batch_builder (obj): Batch builder for the current episode.\n total_reward (float): Summed reward across all agents in this episode.\n length (int): Length of this episode.\n episode_id (int): Unique id identifying this trajectory.\n agent_rewards (dict): Summed rewards broken down by agent.\n custom_metrics (dict): Dict where the you can add custom metrics.\n user_data (dict): Dict that you can use for temporary storage. E.g.\n in between two custom callbacks referring to the same episode.\n hist_data (dict): Dict mapping str keys to List[float] for storage of\n per-timestep float data throughout the episode.\n\n Use case 1: Model-based rollouts in multi-agent:\n A custom compute_actions() function in a policy can inspect the\n current episode state and perform a number of rollouts based on the\n policies and state of other agents in the environment.\n\n Use case 2: Returning extra rollouts data.\n The model rollouts can be returned back to the sampler by calling:\n\n >>> batch = episode.new_batch_builder()\n >>> for each transition:\n batch.add_values(...) # see sampler for usage\n >>> episode.extra_batches.add(batch.build_and_reset())\n \"\"\"\n\n def __init__(self, policies: Dict[PolicyID, Policy],\n policy_mapping_fn: Callable[[AgentID], PolicyID],\n batch_builder_factory: Callable[\n [], \"MultiAgentSampleBatchBuilder\"],\n extra_batch_callback: Callable[[SampleBatchType], None],\n env_id: EnvID):\n self.new_batch_builder: Callable[\n [], \"MultiAgentSampleBatchBuilder\"] = batch_builder_factory\n self.add_extra_batch: Callable[[SampleBatchType],\n None] = extra_batch_callback\n self.batch_builder: \"MultiAgentSampleBatchBuilder\" = \\\n batch_builder_factory()\n self.total_reward: float = 0.0\n self.length: int = 0\n self.episode_id: int = random.randrange(2e9)\n self.env_id = env_id\n self.agent_rewards: Dict[AgentID, float] = defaultdict(float)\n self.custom_metrics: Dict[str, float] = {}\n self.user_data: Dict[str, Any] = {}\n self.hist_data: Dict[str, List[float]] = {}\n self.media: Dict[str, Any] = {}\n self._policies: Dict[PolicyID, Policy] = policies\n self._policy_mapping_fn: Callable[[AgentID], PolicyID] = \\\n policy_mapping_fn\n self._next_agent_index: int = 0\n self._agent_to_index: Dict[AgentID, int] = {}\n self._agent_to_policy: Dict[AgentID, PolicyID] = {}\n self._agent_to_rnn_state: Dict[AgentID, List[Any]] = {}\n self._agent_to_last_obs: Dict[AgentID, EnvObsType] = {}\n self._agent_to_last_raw_obs: Dict[AgentID, EnvObsType] = {}\n self._agent_to_last_info: Dict[AgentID, EnvInfoDict] = {}\n self._agent_to_last_action: Dict[AgentID, EnvActionType] = {}\n self._agent_to_last_pi_info: Dict[AgentID, dict] = {}\n self._agent_to_prev_action: Dict[AgentID, EnvActionType] = {}\n self._agent_reward_history: Dict[AgentID, List[int]] = defaultdict(\n list)\n\n @DeveloperAPI\n def soft_reset(self) -> None:\n \"\"\"Clears rewards and metrics, but retains RNN and other state.\n\n This is used to carry state across multiple logical episodes in the\n same env (i.e., if `soft_horizon` is set).\n \"\"\"\n self.length = 0\n self.episode_id = random.randrange(2e9)\n self.total_reward = 0.0\n self.agent_rewards = defaultdict(float)\n self._agent_reward_history = defaultdict(list)\n\n @DeveloperAPI\n def policy_for(self, agent_id: AgentID = _DUMMY_AGENT_ID) -> PolicyID:\n \"\"\"Returns and stores the policy ID for the specified agent.\n\n If the agent is new, the policy mapping fn will be called to bind the\n agent to a policy for the duration of the episode.\n\n Args:\n agent_id (AgentID): The agent ID to lookup the policy ID for.\n\n Returns:\n PolicyID: The policy ID for the specified agent.\n \"\"\"\n\n if agent_id not in self._agent_to_policy:\n self._agent_to_policy[agent_id] = self._policy_mapping_fn(agent_id)\n return self._agent_to_policy[agent_id]\n\n @DeveloperAPI\n def last_observation_for(\n self, agent_id: AgentID = _DUMMY_AGENT_ID) -> EnvObsType:\n \"\"\"Returns the last observation for the specified agent.\"\"\"\n\n return self._agent_to_last_obs.get(agent_id)\n\n @DeveloperAPI\n def last_raw_obs_for(self,\n agent_id: AgentID = _DUMMY_AGENT_ID) -> EnvObsType:\n \"\"\"Returns the last un-preprocessed obs for the specified agent.\"\"\"\n\n return self._agent_to_last_raw_obs.get(agent_id)\n\n @DeveloperAPI\n def last_info_for(self,\n agent_id: AgentID = _DUMMY_AGENT_ID) -> EnvInfoDict:\n \"\"\"Returns the last info for the specified agent.\"\"\"\n\n return self._agent_to_last_info.get(agent_id)\n\n @DeveloperAPI\n def last_action_for(self,\n agent_id: AgentID = _DUMMY_AGENT_ID) -> EnvActionType:\n \"\"\"Returns the last action for the specified agent, or zeros.\"\"\"\n\n if agent_id in self._agent_to_last_action:\n return flatten_to_single_ndarray(\n self._agent_to_last_action[agent_id])\n else:\n policy = self._policies[self.policy_for(agent_id)]\n flat = flatten_to_single_ndarray(policy.action_space.sample())\n if hasattr(policy.action_space, \"dtype\"):\n return np.zeros_like(flat, dtype=policy.action_space.dtype)\n return np.zeros_like(flat)\n\n @DeveloperAPI\n def prev_action_for(self,\n agent_id: AgentID = _DUMMY_AGENT_ID) -> EnvActionType:\n \"\"\"Returns the previous action for the specified agent.\"\"\"\n\n if agent_id in self._agent_to_prev_action:\n return flatten_to_single_ndarray(\n self._agent_to_prev_action[agent_id])\n else:\n # We're at t=0, so return all zeros.\n return np.zeros_like(self.last_action_for(agent_id))\n\n @DeveloperAPI\n def prev_reward_for(self, agent_id: AgentID = _DUMMY_AGENT_ID) -> float:\n \"\"\"Returns the previous reward for the specified agent.\"\"\"\n\n history = self._agent_reward_history[agent_id]\n if len(history) >= 2:\n return history[-2]\n else:\n # We're at t=0, so there is no previous reward, just return zero.\n return 0.0\n\n @DeveloperAPI\n def rnn_state_for(self, agent_id: AgentID = _DUMMY_AGENT_ID) -> List[Any]:\n \"\"\"Returns the last RNN state for the specified agent.\"\"\"\n\n if agent_id not in self._agent_to_rnn_state:\n policy = self._policies[self.policy_for(agent_id)]\n self._agent_to_rnn_state[agent_id] = policy.get_initial_state()\n return self._agent_to_rnn_state[agent_id]\n\n @DeveloperAPI\n def last_pi_info_for(self, agent_id: AgentID = _DUMMY_AGENT_ID) -> dict:\n \"\"\"Returns the last info object for the specified agent.\"\"\"\n\n return self._agent_to_last_pi_info[agent_id]\n\n def _add_agent_rewards(self, reward_dict: Dict[AgentID, float]) -> None:\n for agent_id, reward in reward_dict.items():\n if reward is not None:\n self.agent_rewards[agent_id,\n self.policy_for(agent_id)] += reward\n self.total_reward += reward\n self._agent_reward_history[agent_id].append(reward)\n\n def _set_rnn_state(self, agent_id, rnn_state):\n self._agent_to_rnn_state[agent_id] = rnn_state\n\n def _set_last_observation(self, agent_id, obs):\n self._agent_to_last_obs[agent_id] = obs\n\n def _set_last_raw_obs(self, agent_id, obs):\n self._agent_to_last_raw_obs[agent_id] = obs\n\n def _set_last_info(self, agent_id, info):\n self._agent_to_last_info[agent_id] = info\n\n def _set_last_action(self, agent_id, action):\n if agent_id in self._agent_to_last_action:\n self._agent_to_prev_action[agent_id] = \\\n self._agent_to_last_action[agent_id]\n self._agent_to_last_action[agent_id] = action\n\n def _set_last_pi_info(self, agent_id, pi_info):\n self._agent_to_last_pi_info[agent_id] = pi_info\n\n def _agent_index(self, agent_id):\n if agent_id not in self._agent_to_index:\n self._agent_to_index[agent_id] = self._next_agent_index\n self._next_agent_index += 1\n return self._agent_to_index[agent_id]\n" ]
[ [ "numpy.square", "numpy.exp", "numpy.mean", "numpy.sqrt" ], [ "numpy.squeeze", "numpy.sign", "numpy.concatenate", "numpy.array", "numpy.zeros" ], [ "numpy.asarray", "numpy.array" ], [ "numpy.__version__.split" ], [ "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LucaMalavolta/q2
[ "d4cd62c3ea898c99334ea84e2b41ec75db9558f7" ]
[ "config.py" ]
[ "import os\r\nimport logging\r\nimport matplotlib.pyplot as plt\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\npath = os.path.dirname(os.path.realpath(__file__))\r\npath = os.path.join(path, 'Data')\r\n\r\nCOLORTEFF_PATH = os.path.join(path, 'ColorTeff')\r\nMODATM_PATH = os.path.join(path, 'ModelAtmospheres')\r\nISOCHRONES_PATH = os.path.join(path, 'Isochrones')\r\nOTHER_PATH = os.path.join(path, 'Other')\r\n\r\nplt.rc(\"font\", family='serif', serif='Ubuntu', monospace='Ubuntu Mono', \\\r\n size=14)\r\nplt.rc(\"axes\", labelsize=15, titlesize=12)\r\nplt.rc(\"xtick\", top=True, direction='in', labelsize=14)\r\nplt.rc(\"xtick.major\", size=8, width=1)\r\nplt.rc(\"ytick\", right=True, direction='in', labelsize=14)\r\nplt.rc(\"ytick.major\", size=8, width=1)\r\nplt.rc(\"lines\", markersize=10, markeredgewidth=2)\r\nplt.rc(\"lines\", linewidth=3)\r\n\r\ndef moog_is_available():\r\n \"\"\"You should be able to run MOOGSILENT from the command line in order\r\n to use the MOOG features included in q2. This function checks if\r\n MOOG is available on your system. If False, you wont be able to\r\n connect q2 to MOOG and many things will fail.\r\n \"\"\"\r\n if os.system('which MOOGSILENT >/dev/null'):\r\n logger.warning(\"MOOGSILENT is not available\")\r\n return False\r\n else:\r\n logger.info(\"MOOGSILENT is available\")\r\n return True\r\n\r\ndef data_are_available():\r\n \"\"\"q2 needs data files with model atmosphere and isochrone grids.\r\n These files can be downloaded from:\r\n http://www.astrochasqui.com/projects/astro/share/q2Data.tar.gz\r\n They need to be extracted inside the q2 directory.\r\n 'tar xvfz q2Data.tar.gz' will create the Data folder.\r\n \"\"\"\r\n if os.path.exists(path):\r\n logger.info(\"Data folder exists\")\r\n return True\r\n else:\r\n logger.warning(\"Data folder does not exist. See the 'Data' section \"\\\r\n \"at https://github.com/astroChasqui/q2\")\r\n return False\r\n" ]
[ [ "matplotlib.pyplot.rc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SeHwanJoo/mmdetection_body
[ "1e1cadc6df91926fc99c4afbae383df0ea9cfed3" ]
[ "mmdet/models/seg_heads/panoptic_fpn_head.py" ]
[ "import torch\nimport torch.nn as nn\nfrom mmcv.runner import ModuleList\n\nfrom ..builder import HEADS\nfrom ..utils import ConvUpsample\nfrom .base_semantic_head import BaseSemanticHead\n\n\[email protected]_module()\nclass PanopticFPNHead(BaseSemanticHead):\n \"\"\"PanopticFPNHead used in Panoptic FPN.\n\n Arg:\n num_classes (int): Number of classes, including all stuff\n classes and one thing class.\n in_channels (int): Number of channels in the input feature\n map.\n inner_channels (int): Number of channels in inner features.\n start_level (int): The start level of the input features\n used in PanopticFPN.\n end_level (int): The end level of the used features, the\n `end_level`-th layer will not be used.\n fg_range (tuple): Range of the foreground classes.\n bg_range (tuple): Range of the background classes.\n conv_cfg (dict): Dictionary to construct and config\n conv layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Use ``GN`` by default.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n loss_seg (dict): the loss of the semantic head.\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels=256,\n inner_channels=128,\n start_level=0,\n end_level=4,\n fg_range=(1, 80),\n bg_range=(81, 133),\n conv_cfg=None,\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n init_cfg=None,\n loss_seg=dict(\n type='CrossEntropyLoss', ignore_index=-1,\n loss_weight=1.0)):\n super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg)\n self.fg_range = fg_range\n self.bg_range = bg_range\n self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1\n self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1\n # Used feature layers are [start_level, end_level)\n self.start_level = start_level\n self.end_level = end_level\n self.num_stages = end_level - start_level\n self.inner_channels = inner_channels\n\n self.conv_upsample_layers = ModuleList()\n for i in range(start_level, end_level):\n self.conv_upsample_layers.append(\n ConvUpsample(\n in_channels,\n inner_channels,\n num_layers=i if i > 0 else 1,\n num_upsample=i if i > 0 else 0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n ))\n self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1)\n\n def _set_things_to_void(self, gt_semantic_seg):\n \"\"\"Merge thing classes to one class.\"\"\"\n gt_semantic_seg = gt_semantic_seg.int()\n fg_mask = (gt_semantic_seg >= self.fg_range[0]) * (\n gt_semantic_seg <= self.fg_range[1])\n bg_mask = (gt_semantic_seg >= self.bg_range[0]) * (\n gt_semantic_seg <= self.bg_range[1])\n\n new_gt_seg = fg_mask.int() * (self.bg_nums + 1)\n new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums,\n new_gt_seg)\n return new_gt_seg\n\n def loss(self, seg_preds, gt_semantic_seg, label_bias=-1):\n \"\"\"The loss of PanopticFPN head.\n\n Things classes will be merged to one class in PanopticFPN.\n \"\"\"\n gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)\n return super().loss(seg_preds, gt_semantic_seg, label_bias)\n\n def init_weights(self):\n super().init_weights()\n nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)\n self.conv_logits.bias.data.zero_()\n\n def forward(self, x):\n # the number of subnets must be not more than\n # the length of features.\n assert self.num_stages <= len(x)\n\n feats = []\n for i, layer in enumerate(self.conv_upsample_layers):\n f = layer(x[self.start_level + i])\n feats.append(f)\n\n feats = torch.sum(torch.stack(feats, dim=0), dim=0)\n seg_preds = self.conv_logits(feats)\n out = dict(seg_preds=seg_preds, feats=feats)\n return out\n" ]
[ [ "torch.stack", "torch.nn.Conv2d", "torch.nn.init.normal_", "torch.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AndresGarciaEscalante/bstld
[ "cc37fb3388b7731be9e76fd1c4e2be13b6716afe" ]
[ "tf_object_detection/to_tfrecords.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nCreates full-image tfrecords to use the Bosch Small Traffic Lights Dataset\nwith the Tensorflow Object Detection API.\n\nThe training set is split into training and validation. Tfrecords are created\nfor a training, validation, and test set. Labels are grouped by their respective\ncolors to simplify training and because the test-set does not contain any arrows.\n\nDepending on the training method, you may want to look into creating random crops\nfrom the images which can increase training performance due to translated inputs.\nThe tfrecords come without any image augmentation.\n\nThe created tfrecords will be about 18GB.\n\nUsage:\n In the folder with the extracted traffic lights dataset, run\n python /path/to/this/file/to_tfrecords.py\n and it will create the tfrecords there.\n\nThe path of the annotation files, tfrecords, and dataset folder can be specified.\nNote that this is a tutorial file. There are only few checks and no logging.\n\"\"\"\n\nimport argparse\nfrom collections import OrderedDict, defaultdict\nimport hashlib\nimport os\nfrom random import shuffle\n\nimport cv2\nimport tensorflow as tf\nimport tqdm\n\n# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md\nfrom object_detection.utils import dataset_util\n\nimport sys\n \n# getting the name of the directory\n# where the this file is present.\ncurrent = os.path.dirname(os.path.realpath(__file__))\n \n# Getting the parent directory name\n# where the current directory is present.\nparent = os.path.dirname(current)\n \n# adding the parent directory to \n# the sys.path.\nsys.path.append(parent)\n\nfrom read_label_file import get_all_labels\nfrom tf_object_detection import constants\n\n\ndef label_id(label_string):\n \"\"\" For detections without classification \"\"\"\n # For object proposals only, you could return 1\n return constants.TF_ID_MAP[constants.SIMPLIFIED_CLASSES[label_string]]\n\n\ndef modified_label_string(label_string):\n \"\"\" To simplify the problem, training classes are grouped by color \"\"\"\n return constants.SIMPLIFIED_CLASSES[label_string].encode('utf8')\n\n\ndef list_of_dicts_to_dict_of_lists(list_of_dicts):\n \"\"\" [{'a': 0, 'b':3}, {'a': 3, 'b':5}] --> {'a': [0, 3], 'b': [3, 5]}\"\"\"\n assert isinstance(list_of_dicts, list)\n dict_lists = defaultdict(list)\n for some_dict in list_of_dicts:\n for key, value in some_dict.items():\n dict_lists[key].append(value)\n return dict_lists\n\n\ndef clip(some_value):\n \"\"\" Clip values outside [0, 1]. float -> float \"\"\"\n # Just in case some very eager annotators detected lights outside the image. It happens\n return max(0, min(some_value, 1))\n\n\ndef create_object_detection_tfrecords(labels, tfrecords_path, dataset_folder, set_name=''):\n \"\"\" Creates a tfrecord dataset specific to tensorflow/models/research/objection_detection\n params:\n labels: list of annotations as defined in annotation yamls\n tfrecords_path: output path to create tfrecords\n dataset_folder: path to bstld folder, must include rgb directory\n \"\"\"\n\n #shuffle(labels)\n writer = tf.io.TFRecordWriter(tfrecords_path)\n for label in tqdm.tqdm(labels, desc='Creating {}-set'.format(set_name)):\n image_path = os.path.join(dataset_folder, label['path'])\n image = cv2.imread(image_path)\n if image is None:\n print('Did you extract the training, validation, and additional images?')\n raise IOError('Missing: {}'.format(image_path))\n height, width, _ = image.shape\n\n boxes = list_of_dicts_to_dict_of_lists(label['boxes'])\n classes = boxes['label']\n xmin = list(map(lambda x: clip(x / float(width)), boxes['x_min']))\n ymin = list(map(lambda y: clip(y / float(height)), boxes['y_min']))\n xmax = list(map(lambda x: clip(x / float(width)), boxes['x_max']))\n ymax = list(map(lambda y: clip(y / float(height)), boxes['y_max']))\n\n assert len(xmin) == len(xmax) == len(ymin)\n assert len(ymax) == len(classes) == len(label['boxes'])\n\n if not classes:\n continue # We don't need empty images, there are enough negatives\n\n _, image = cv2.imencode('.png', image) # Assuming that works\n image = image.tostring()\n sha256 = hashlib.sha256(image).hexdigest()\n image_format = 'png'\n complete_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')),\n 'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),\n 'image/format': dataset_util.bytes_feature(image_format.encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(sha256.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(\n list(map(modified_label_string, classes))),\n 'image/object/class/label': dataset_util.int64_list_feature(\n list(map(label_id, classes))),\n }))\n writer.write(complete_example.SerializeToString())\n\n writer.close()\n\n\ndef split_train_labels(train_labels):\n # one entry for each image in a folder/video to check their sizes later\n train_videos = [os.path.split(os.path.split(train_label['path'])[0])[1]\n for train_label in train_labels]\n # NOTE Because set order is not guaranteed (and we want to support different Python versions)\n video_dict = OrderedDict().fromkeys(train_videos)\n video_lengths = [train_videos.count(video) for video in video_dict.keys()]\n # The first three videos are used for the validation set.\n # Note that this may not be a completely clean validation set as the sequences\n # were captured independently but may be on the same day and are taken within\n # the same general area. This split is for object detection demonstation\n # purposes only. For clean dataset separation, the sequences would need to be\n # recorded on separate days and preferably in different areas.\n #\n # validation samples: 933, training samples: 4160 (+215 additional)\n num_valid_samples = sum(video_lengths[:3])\n return train_labels[num_valid_samples:], train_labels[:num_valid_samples]\n\n\ndef create_datasets(config):\n \"\"\" Splits labels and creates datasets \"\"\"\n train_labels = get_all_labels(config['train_yaml'])\n test_labels = get_all_labels(config['test_yaml'])\n\n if config['additional_yaml']:\n additional_labels = get_all_labels(config['additional_yaml'])\n\n # Split training labels into training and validation for \"more correct\" validation\n train_labels, valid_labels = split_train_labels(train_labels)\n train_labels.extend(additional_labels) # add unappealing images to training set\n\n if not os.path.isdir(config['dataset_folder']) or\\\n not os.path.isdir(os.path.join(config['dataset_folder'], 'rgb')):\n print('Dataset_folder needs to contain extracted dataset, including the rgb folder')\n print('{} does not fulfill those requirements'.format(config['dataset_folder']))\n\n create_object_detection_tfrecords(\n train_labels, config['train_tfrecord'], config['dataset_folder'], 'train')\n create_object_detection_tfrecords(\n valid_labels, config['valid_tfrecord'], config['dataset_folder'], 'valid')\n create_object_detection_tfrecords(\n test_labels, config['test_tfrecord'], config['dataset_folder'], 'test')\n\n print('Done creating tfrecords')\n\n\ndef parse_args():\n \"\"\" Command line args to tfrecords creation config \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--train_yaml', default='train.yaml',\n help='Path to train.yaml')\n parser.add_argument('--test_yaml', default='test.yaml',\n help='Path to test.yaml')\n parser.add_argument('--additional_yaml', default='additional_train.yaml',\n help='Path to train_additional.yaml')\n parser.add_argument('--dataset_folder', default='.',\n help='Path to dataset folder')\n parser.add_argument('--train_tfrecord', default='train.tfrecords',\n help='Path to train.tfrecord')\n parser.add_argument('--valid_tfrecord', default='valid.tfrecords',\n help='Path to valid.tfrecord')\n parser.add_argument('--test_tfrecord', default='test.tfrecords',\n help='Path to test.tfrecord')\n args = vars(parser.parse_args())\n return args\n\n\nif __name__ == '__main__':\n config = parse_args()\n create_datasets(config)\n" ]
[ [ "tensorflow.io.TFRecordWriter", "tensorflow.train.BytesList" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
soyoung9306/-3-keras
[ "4090fcc86072cda816d1d6056b5113ace49534ae", "e65f40171aadef3fe0b59c649b55b3f0bd09ca41" ]
[ "ex9_1_applications_agumentation.py", "old/gan_cnn_mnist_org.py" ]
[ "\"\"\"\nCH 9.1 Applications/Image Augmentation\n\"\"\"\nfrom sklearn import model_selection\nfrom keras import datasets\nimport keras\nassert keras.backend.image_data_format() == 'channels_last'\n\nfrom keraspp import aigen\n\n\nclass Machine(aigen.Machine_Generator):\n def __init__(self):\n (x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()\n _, X, _, y = model_selection.train_test_split(x_train, y_train, test_size=0.02)\n X = X.astype(float)\n\n gen_param_dict = {'rotation_range': 10}\n\n super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict)\n\n\ndef main():\n m = Machine()\n m.run()\n\n\nif __name__ == '__main__':\n main()", "################################\n# 공통 패키지 불러오기\n################################\nfrom keras.datasets import mnist\nimport numpy as np\nfrom PIL import Image\nimport math\nimport os\n\nimport keras.backend as K\n\nK.set_image_data_format('channels_first')\nprint(K.image_data_format)\n\n################################\n# GAN 모델링\n################################\nfrom keras import models, layers, optimizers\n\n\nclass GAN(models.Sequential):\n def __init__(self, input_dim=64):\n \"\"\"\n self, self.generator, self.discriminator are all models\n \"\"\"\n super().__init__()\n self.input_dim = input_dim\n\n self.generator = self.GENERATOR()\n self.discriminator = self.DISCRIMINATOR()\n self.add(self.generator)\n self.discriminator.trainable = False\n self.add(self.discriminator)\n\n self.compile_all()\n\n def compile_all(self):\n # Compiling stage\n d_optim = optimizers.SGD(lr=0.0005, momentum=0.9, nesterov=True)\n g_optim = optimizers.SGD(lr=0.0005, momentum=0.9, nesterov=True)\n self.generator.compile(loss='binary_crossentropy', optimizer=\"SGD\")\n self.compile(loss='binary_crossentropy', optimizer=g_optim)\n self.discriminator.trainable = True\n self.discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)\n\n def GENERATOR(self):\n input_dim = self.input_dim\n\n model = models.Sequential()\n model.add(layers.Dense(1024, activation='tanh', input_dim=input_dim))\n model.add(layers.Dense(128 * 7 * 7, activation='tanh'))\n model.add(layers.BatchNormalization())\n model.add(layers.Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))\n model.add(layers.UpSampling2D(size=(2, 2)))\n model.add(layers.Conv2D(64, (5, 5), padding='same', activation='tanh'))\n model.add(layers.UpSampling2D(size=(2, 2)))\n model.add(layers.Conv2D(1, (5, 5), padding='same', activation='tanh'))\n return model\n\n def DISCRIMINATOR(self):\n model = models.Sequential()\n model.add(layers.Conv2D(64, (5, 5), padding='same', activation='tanh',\n input_shape=(1, 28, 28)))\n model.add(layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(layers.Conv2D(128, (5, 5), activation='tanh'))\n model.add(layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(1024, activation='tanh'))\n model.add(layers.Dense(1, activation='sigmoid'))\n return model\n\n def get_z(self, ln):\n input_dim = self.input_dim\n return np.random.uniform(-1, 1, (ln, input_dim))\n\n def train_both(self, x):\n ln = x.shape[0]\n # First trial for training discriminator\n z = self.get_z(ln)\n w = self.generator.predict(z, verbose=0)\n xw = np.concatenate((x, w))\n y2 = [1] * ln + [0] * ln\n d_loss = self.discriminator.train_on_batch(xw, y2)\n\n # Second trial for training generator\n z = self.get_z(ln)\n self.discriminator.trainable = False\n g_loss = self.train_on_batch(z, [1] * ln)\n self.discriminator.trainable = True\n\n return d_loss, g_loss\n\n\n################################\n# GAN 학습하기\n################################\ndef combine_images(generated_images):\n num = generated_images.shape[0]\n width = int(math.sqrt(num))\n height = int(math.ceil(float(num) / width))\n shape = generated_images.shape[2:]\n image = np.zeros((height * shape[0], width * shape[1]),\n dtype=generated_images.dtype)\n for index, img in enumerate(generated_images):\n i = int(index / width)\n j = index % width\n image[i * shape[0]:(i + 1) * shape[0],\n j * shape[1]:(j + 1) * shape[1]] = img[0, :, :]\n return image\n\n\ndef get_x(X_train, index, BATCH_SIZE):\n return X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]\n\n\ndef save_images(generated_images, output_fold, epoch, index):\n image = combine_images(generated_images)\n image = image * 127.5 + 127.5\n Image.fromarray(image.astype(np.uint8)).save(\n output_fold + '/' +\n str(epoch) + \"_\" + str(index) + \".png\")\n\n\ndef load_data():\n (X_train, y_train), (_, _) = mnist.load_data()\n\n return X_train[:10]\n\ndef train(args):\n BATCH_SIZE = args.batch_size\n epochs = args.epochs\n output_fold = args.output_fold\n input_dim = args.input_dim\n\n os.makedirs(output_fold, exist_ok=True)\n print('Output_fold is', output_fold)\n\n X_train = load_data()\n\n X_train = (X_train.astype(np.float32) - 127.5) / 127.5\n X_train = X_train.reshape((X_train.shape[0], 1) + X_train.shape[1:])\n\n gan = GAN(input_dim)\n\n d_loss_ll = []\n g_loss_ll = []\n for epoch in range(epochs):\n print(\"Epoch is\", epoch)\n print(\"Number of batches\", int(X_train.shape[0] / BATCH_SIZE))\n\n d_loss_l = []\n g_loss_l = []\n for index in range(int(X_train.shape[0] / BATCH_SIZE)):\n x = get_x(X_train, index, BATCH_SIZE)\n\n d_loss, g_loss = gan.train_both(x)\n\n d_loss_l.append(d_loss)\n g_loss_l.append(g_loss)\n\n if epoch % 10 == 0 or epoch == epochs - 1:\n z = gan.get_z(x.shape[0])\n w = gan.generator.predict(z, verbose=0)\n save_images(w, output_fold, epoch, 0)\n\n d_loss_ll.append(d_loss_l)\n g_loss_ll.append(g_loss_l)\n\n gan.generator.save_weights(output_fold + '/' + 'generator', True)\n gan.discriminator.save_weights(output_fold + '/' + 'discriminator', True)\n\n np.savetxt(output_fold + '/' + 'd_loss', d_loss_ll)\n np.savetxt(output_fold + '/' + 'g_loss', g_loss_ll)\n\n\n################################\n# GAN 예제 실행하기\n################################\ndef main():\n class ARGS:\n pass\n\n args = ARGS()\n args.batch_size = 2\n args.epochs = 10\n args.output_fold = 'GAN_OUT'\n args.input_dim = 10\n\n train(args)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "sklearn.model_selection.train_test_split" ], [ "numpy.savetxt", "numpy.random.uniform", "numpy.zeros", "numpy.concatenate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yangheng95/PyABSA
[ "f5b46047a58fa8054a0469486be3f1cada933814" ]
[ "pyabsa/core/tc/prediction/text_classifier.py" ]
[ "# -*- coding: utf-8 -*-\n# file: text_classifier.py\n# author: yangheng <[email protected]>\n# Copyright (C) 2020. All Rights Reserved.\nimport json\nimport os\nimport pickle\nimport random\n\nimport numpy\nimport torch\nfrom findfile import find_file\nfrom termcolor import colored\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoTokenizer, AutoModel\n\nfrom pyabsa.functional.dataset import detect_infer_dataset\n\nfrom ..models import GloVeClassificationModelList, BERTClassificationModelList\nfrom ..classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeClassificationDataset\nfrom ..classic.__bert__.dataset_utils.data_utils_for_inferring import BERTClassificationDataset\n\nfrom ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer\n\nfrom pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError\n\n\nclass TextClassifier:\n def __init__(self, model_arg=None, label_map=None, eval_batch_size=128):\n '''\n from_train_model: load inferring_tutorials model from trained model\n '''\n\n self.initializers = {\n 'xavier_uniform_': torch.nn.init.xavier_uniform_,\n 'xavier_normal_': torch.nn.init.xavier_normal,\n 'orthogonal_': torch.nn.init.orthogonal_\n }\n # load from a training\n if not isinstance(model_arg, str):\n print('Load text classifier from training')\n self.model = model_arg[0]\n self.opt = model_arg[1]\n self.tokenizer = model_arg[2]\n else:\n try:\n if 'fine-tuned' in model_arg:\n raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')\n print('Load text classifier from', model_arg)\n state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX'])\n model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX'])\n tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX'])\n config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX'])\n\n print('config: {}'.format(config_path))\n print('state_dict: {}'.format(state_dict_path))\n print('model: {}'.format(model_path))\n print('tokenizer: {}'.format(tokenizer_path))\n\n self.opt = pickle.load(open(config_path, mode='rb'))\n\n if state_dict_path or model_path:\n if not hasattr(GloVeClassificationModelList, self.opt.model.__name__.upper()):\n if 'pretrained_bert_name' in self.opt.args or 'pretrained_bert' in self.opt.args:\n if 'pretrained_bert_name' in self.opt.args:\n self.opt.pretrained_bert = self.opt.pretrained_bert_name\n if state_dict_path:\n try:\n self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert)\n self.model = self.opt.model(self.bert, self.opt)\n except ValueError:\n raise TransformerConnectionError()\n elif model_path:\n if model_path:\n self.model = torch.load(model_path, map_location='cpu')\n if tokenizer_path:\n self.tokenizer = pickle.load(open(tokenizer_path, mode='rb'))\n else:\n raise ValueError('No .tokenizer found!')\n else:\n self.tokenizer = build_tokenizer(\n dataset_list=self.opt.dataset_file,\n max_seq_len=self.opt.max_seq_len,\n dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),\n opt=self.opt\n )\n if model_path:\n self.model = torch.load(model_path, map_location='cpu')\n else:\n self.embedding_matrix = build_embedding_matrix(\n word2idx=self.tokenizer.word2idx,\n embed_dim=self.opt.embed_dim,\n dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),\n opt=self.opt\n )\n self.model = self.opt.model(self.embedding_matrix, self.opt).to(self.opt.device)\n self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))\n\n print('Config used in Training:')\n print_args(self.opt, mode=1)\n\n except Exception as e:\n raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))\n\n if not hasattr(GloVeClassificationModelList, self.model.__class__.__name__) \\\n and not hasattr(BERTClassificationModelList, self.model.__class__.__name__):\n raise KeyError('The checkpoint you are loading is not from classifier model.')\n\n if hasattr(BERTClassificationModelList, self.opt.model.__name__):\n self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)\n\n elif hasattr(GloVeClassificationModelList, self.opt.model.__name__):\n self.dataset = GloVeClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)\n\n self.opt.inputs_cols = self.model.inputs\n\n self.infer_dataloader = None\n self.opt.eval_batch_size = eval_batch_size\n\n if self.opt.seed is not None:\n random.seed(self.opt.seed)\n numpy.random.seed(self.opt.seed)\n torch.manual_seed(self.opt.seed)\n torch.cuda.manual_seed(self.opt.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n self.opt.initializer = self.opt.initializer\n\n self.label_map = None\n self.set_label_map(label_map)\n\n def set_label_map(self, label_map):\n if label_map:\n print(colored('Warning: label map is deprecated, please directly set labels within dataset.', 'red'))\n label_map[LABEL_PADDING] = ''\n self.label_map = label_map\n\n def to(self, device=None):\n self.opt.device = device\n self.model.to(device)\n\n def cpu(self):\n self.opt.device = 'cpu'\n self.model.to('cpu')\n\n def cuda(self, device='cuda:0'):\n self.opt.device = device\n self.model.to(device)\n\n def _log_write_args(self):\n n_trainable_params, n_nontrainable_params = 0, 0\n for p in self.model.parameters():\n n_params = torch.prod(torch.tensor(p.shape))\n if p.requires_grad:\n n_trainable_params += n_params\n else:\n n_nontrainable_params += n_params\n print(\n 'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))\n for arg in vars(self.opt):\n if getattr(self.opt, arg) is not None:\n print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))\n\n def batch_infer(self,\n target_file=None,\n print_result=True,\n save_result=False,\n clear_input_samples=True,\n ignore_error=True):\n\n if clear_input_samples:\n self.clear_input_samples()\n\n save_path = os.path.join(os.getcwd(), 'text_classification.result.json')\n\n target_file = detect_infer_dataset(target_file, task='text_classification')\n if not target_file:\n raise FileNotFoundError('Can not find inference datasets!')\n\n self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)\n self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)\n return self._infer(save_path=save_path if save_result else None, print_result=print_result)\n\n def infer(self, text: str = None,\n print_result=True,\n clear_input_samples=True):\n\n if clear_input_samples:\n self.clear_input_samples()\n if text:\n self.dataset.prepare_infer_sample(text)\n else:\n raise RuntimeError('Please specify your datasets path!')\n self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)\n return self._infer(print_result=print_result)\n\n def merge_results(self, results):\n \"\"\" merge APC results have the same input text\n \"\"\"\n final_res = []\n for result in results:\n\n if final_res and \"\".join(final_res[-1]['text'].split()) == \"\".join(result['text'].split()):\n final_res[-1]['label'].append(result['label'])\n final_res[-1]['ref_label'].append(result['ref_label'])\n final_res[-1]['ref_check'].append(result['ref_check'])\n else:\n final_res.append(\n {\n 'text': result['text'].replace(' ', ' '),\n 'label': [result['label']],\n 'ref_label': [result['ref_label']],\n 'ref_check': [result['ref_check']]\n }\n )\n\n return final_res\n\n def _infer(self, save_path=None, print_result=True):\n\n _params = filter(lambda p: p.requires_grad, self.model.parameters())\n\n correct = {True: 'Correct', False: 'Wrong'}\n results = []\n\n with torch.no_grad():\n self.model.eval()\n n_correct = 0\n n_labeled = 0\n n_total = 0\n for _, sample in enumerate(self.infer_dataloader):\n inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label']\n self.model.eval()\n outputs = self.model(inputs)\n sen_logits = outputs\n t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()\n for i, i_probs in enumerate(t_probs):\n if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)):\n sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]\n if sample['label'] != -999:\n real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.')\n else:\n real_sent = 'N.A.'\n if real_sent != -999 and real_sent != '-999':\n n_labeled += 1\n if sent == real_sent:\n n_correct += 1\n else: # for the former versions until 1.2.0\n sent = int(i_probs.argmax(axis=-1))\n real_sent = int(sample['label'][i])\n\n text_raw = sample['text_raw'][i]\n\n results.append({\n 'text': text_raw,\n 'label': sent,\n 'ref_label': real_sent,\n 'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',\n })\n n_total += 1\n if len(self.infer_dataloader) > 1:\n print('Total samples:{}'.format(n_total))\n print('Labeled samples:{}'.format(n_labeled))\n print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))\n\n try:\n if print_result:\n for result in results:\n text_printing = result['text']\n\n if result['ref_label'] != -999:\n if result['label'] == result['ref_label']:\n text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'green')\n else:\n text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'red')\n else:\n text_info = ' -> {}'.format(result['label'])\n\n text_printing += text_info\n print(text_printing)\n if save_path:\n fout = open(save_path, 'w', encoding='utf8')\n json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False)\n # fout.write('Total samples:{}\\n'.format(n_total))\n # fout.write('Labeled samples:{}\\n'.format(n_labeled))\n # fout.write('Prediction Accuracy:{}%\\n'.format(100 * n_correct / n_labeled)) if n_labeled else 'N.A.'\n print('inference result saved in: {}'.format(save_path))\n except Exception as e:\n print('Can not save result: {}, Exception: {}'.format(text_raw, e))\n return results\n\n def clear_input_samples(self):\n self.dataset.all_data = []\n" ]
[ [ "torch.softmax", "torch.cuda.manual_seed", "numpy.random.seed", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.tensor", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fqnchina/NeuralRouting
[ "333dc95cb2d9a779de88e2349883a0002111d1b3" ]
[ "dataset_loader.py" ]
[ "import numpy as np\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom torchvision import transforms as tfs\r\nfrom PIL import Image\r\nimport os, cv2, copy, time\r\nfrom config import *\r\n\r\n\r\n# args.\r\nimage_height, image_width = opt.image_height, opt.image_width\r\nintrinsics = opt.intrinsics\r\nclose_radius, far_radiuses = 0, opt.far_radiuses\r\nn_neighb_pts = opt.n_neighb_pts\r\n\r\n\r\ndef isSon(son, fa):\r\n for i in range(len(fa)):\r\n if son[i] != fa[i]:\r\n return False\r\n return True\r\n\r\n\r\n# todo: to be migrated...\r\ndef depth2local(depth): # depth: float32, meter.\r\n cx, cy, fx, fy = intrinsics[0, 2], intrinsics[1, 2], intrinsics[0, 0], intrinsics[1, 1]\r\n u_base = np.tile(np.arange(image_width), (image_height, 1))\r\n v_base = np.tile(np.arange(image_height)[:, np.newaxis], (1, image_width))\r\n X = (u_base - cx) * depth / fx\r\n Y = (v_base - cy) * depth / fy\r\n coord_local = np.stack((X, Y, depth), axis=2)\r\n return coord_local\r\ndef partial_pts(pts_all_in, p, r_min, r_max): # pts_all_in.shape (#points, #channel)\r\n pts_all = copy.deepcopy(pts_all_in)\r\n p_mat = p[np.newaxis, 0:3].repeat(pts_all.shape[0], axis=0)\r\n norms = np.linalg.norm((p_mat - pts_all[:, 0:3]), axis=1)\r\n return pts_all[np.logical_and(norms >= r_min, norms <= r_max)]\r\ndef sample_pts(pts_in, num): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n while pts.shape[0] < num:\r\n pts = np.concatenate((pts, pts), axis=0)\r\n rand_ids = np.arange(pts.shape[0])\r\n np.random.shuffle(rand_ids)\r\n return pts[rand_ids[0:num], :]\r\ndef sample_pts_rc(pts_in, rcs_in, num): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n rcs = copy.deepcopy(rcs_in)\r\n while pts.shape[0] < num:\r\n pts = np.concatenate((pts, pts), axis=0)\r\n rand_ids = np.arange(pts.shape[0])\r\n np.random.shuffle(rand_ids)\r\n return pts[rand_ids[0:num], :], rcs_in[rand_ids[0:num], :]\r\ndef sample_pts9d_r3d(pts_in, num, radius): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n thresh = 500\r\n # remove background by 3d radius\r\n xyz = pts[:, 0:3]\r\n pts = pts[np.linalg.norm(xyz, axis=1) <= radius]\r\n # print('pt num after r3d {}'.format(pts.shape[0]))\r\n if pts.shape[0] < thresh: # avoid infinite loop.\r\n return None\r\n while pts.shape[0] < num:\r\n pts = np.concatenate((pts, pts), axis=0)\r\n rand_ids = np.arange(pts.shape[0])\r\n np.random.shuffle(rand_ids)\r\n return pts[rand_ids[0:num], :]\r\ndef shift_pts(pts_in, cen): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)\r\n pts[:, 0:3] = pts[:, 0:3] - cen_mat\r\n return pts\r\ndef shift_pts6d(pts_in, cen): # pts_in.shape (#points, #channel)\r\n pts = copy.deepcopy(pts_in)\r\n cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)\r\n pts[:, :] = pts[:, :] - cen_mat\r\n return pts\r\ndef shift_pts9d(pts_in, cen): # pts_in.shape (#points, #channel)\r\n cpt = copy.deepcopy(cen)\r\n cpt[3:6] = np.zeros(3) # remove shift of normal\r\n pts = copy.deepcopy(pts_in)\r\n cpt_mat = cpt[np.newaxis, :].repeat(pts.shape[0], axis=0)\r\n pts[:, :] = pts[:, :] - cpt_mat\r\n return pts\r\n\r\n\r\ndef make_ppf(pts9d, cen9d): # (N,9), (9,)\r\n # prepare\r\n n_pts = pts9d.shape[0]\r\n d = pts9d[:, 0:3]\r\n n2 = pts9d[:, 3:6]\r\n n1 = np.repeat(cen9d[3:6].reshape(1, 3), n_pts, axis=0)\r\n # ppf\r\n dim1 = np.linalg.norm(d, axis=1).reshape(n_pts, 1)\r\n d = d / (dim1.reshape(n_pts, 1))\r\n dim2 = np.sum(n1 * d, axis=1).reshape(n_pts, 1)\r\n dim3 = np.sum(n2 * d, axis=1).reshape(n_pts, 1)\r\n dim4 = np.sum(n1 * n2, axis=1).reshape(n_pts, 1)\r\n ppf = np.concatenate((dim1, dim2, dim3, dim4), axis=1)\r\n ppf7d = np.concatenate((ppf, pts9d[:, 6:9]), axis=1)\r\n return ppf7d\r\n\r\ndef compute_points_normal(pts):\r\n raw_shape = pts.shape\r\n normal = np.zeros((raw_shape)) # (r,c,3)\r\n t0 = time.time()\r\n for r in range(2, raw_shape[0] - 2):\r\n for c in range(2, raw_shape[1] - 2):\r\n pts_local = pts[r - 2:r + 3, c - 2:c + 3, :] # (5,5,3)\r\n pts_local = pts_local.reshape(-1, 3) # (N,3)\r\n pts_local = pts_local[np.linalg.norm(pts_local - pts[r, c, :], axis=1) < 0.1] # remove outliers.\r\n if pts_local.shape[0] < 4:\r\n continue\r\n pts_local = pts_local - np.mean(pts_local, axis=0)\r\n C = pts_local.T @ pts_local / pts_local.shape[0]\r\n e, v = np.linalg.eig(C)\r\n d = v[:, np.where(e == np.min(e))[0][0]]\r\n n = d / np.linalg.norm(d)\r\n if np.dot(n, np.array([0, 0, 1])) > 0:\r\n n = -n\r\n normal[r, c, :] = n\r\n t1 = time.time()\r\n print('preprocess data: compute normal cost {:.2f}s'.format(t1 - t0))\r\n return normal\r\n\r\n\r\n# for depth adaptive 2d\r\ndef partial_pts_2d(pts_rc, cen_rc, list_drdc):\r\n result = None\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n mat_drdc = (np.array(list_drdc) / 4).astype(int)\r\n mat_cen_rc = np.array(cen_rc)\r\n mat_targ_rc = cen_rc + mat_drdc\r\n mat_targ_rc[mat_targ_rc < 0] = 0\r\n targ_r = mat_targ_rc[:, 0]\r\n targ_r[targ_r > r_max] = r_max\r\n targ_c = mat_targ_rc[:, 1]\r\n targ_c[targ_c > c_max] = c_max\r\n result = pts_rc[targ_r, targ_c]\r\n return copy.deepcopy(result)\r\n\r\n\r\n# for depth adaptive 2d\r\ndef partial_pts_2d_rc(pts_rc, cen_rc, list_drdc):\r\n result = None\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n mat_drdc = (np.array(list_drdc) / 4).astype(int)\r\n mat_cen_rc = np.array(cen_rc)\r\n mat_targ_rc = cen_rc + mat_drdc\r\n mat_targ_rc[mat_targ_rc < 0] = 0\r\n targ_r = mat_targ_rc[:, 0]\r\n targ_r[targ_r > r_max] = r_max\r\n targ_c = mat_targ_rc[:, 1]\r\n targ_c[targ_c > c_max] = c_max\r\n result = pts_rc[targ_r, targ_c]\r\n return copy.deepcopy(result), copy.deepcopy(\r\n np.concatenate((targ_r.reshape(targ_r.shape[0], 1), targ_c.reshape(targ_c.shape[0], 1)), axis=1))\r\n\r\n\r\n# for depth adaptive 2d with dynamics label\r\ndef partial_pts_2d_with_label(pts_rc, cen_rc, list_drdc, mask): # mask: 0 for static pixel, 255 for dynamic pixel.\r\n result = None\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n mat_drdc = (np.array(list_drdc) / 4).astype(int)\r\n mat_cen_rc = np.array(cen_rc)\r\n mat_targ_rc = cen_rc + mat_drdc\r\n mat_targ_rc[mat_targ_rc < 0] = 0\r\n targ_r = mat_targ_rc[:, 0]\r\n targ_r[targ_r > r_max] = r_max\r\n targ_c = mat_targ_rc[:, 1]\r\n targ_c[targ_c > c_max] = c_max\r\n m1 = np.zeros((mask.shape[0], mask.shape[1]))\r\n m1[mask == 0] = 1\r\n m2 = np.zeros((mask.shape[0], mask.shape[1]))\r\n m2[targ_r, targ_c] = 1\r\n m3 = np.logical_and(m1, m2)\r\n result = pts_rc[m3]\r\n return copy.deepcopy(result)\r\n\r\n\r\nclass LevelDataset_PPF(Dataset):\r\n\r\n def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None, far_radius=None,\r\n enable_color_aug=True, specified_node=None):\r\n super().__init__()\r\n self.data_dir, self.the_list = data_dir, the_list\r\n self.n_pts_per_frame = n_pts_per_frame\r\n self.neighbor_da2d = neighbor_da2d # (n_pts, dim_pt).\r\n self.far_radius = far_radius # scalar.\r\n self.enable_color_aug = enable_color_aug\r\n self.specified_node = specified_node\r\n\r\n def __len__(self):\r\n return len(self.the_list)\r\n\r\n def __getitem__(self, idx):\r\n fid, rc_route = self.the_list[idx]\r\n # load \r\n depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0\r\n color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]\r\n # color jitter\r\n if self.enable_color_aug:\r\n img = Image.fromarray(color)\r\n if np.random.rand() < 0.5:\r\n img = tfs.ColorJitter(brightness=1.)(img)\r\n if np.random.rand() < 0.5:\r\n img = tfs.ColorJitter(contrast=1.)(img)\r\n if np.random.rand() < 0.5:\r\n img = tfs.ColorJitter(saturation=1.)(img)\r\n color = np.array(img)\r\n if np.max(color) > 1:\r\n color = color / 255. - 0.5\r\n local = depth2local(depth)\r\n r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))\r\n depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :]\r\n # normal by 3d neighbor plane fitting.\r\n normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)\r\n if os.path.exists(normal_path):\r\n # print('fid {}'.format(fid)) # to debug rio10 scene09 10\r\n # normal = np.load(normal_path)\r\n if os.path.getsize(normal_path) > 1:\r\n normal = np.load(normal_path, encoding='bytes', allow_pickle=True)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)\r\n # build a patch\r\n rand_ids = np.arange(len(rc_route))\r\n np.random.shuffle(rand_ids)\r\n selected_ids = rand_ids[0:self.n_pts_per_frame * 2] # more candidates\r\n pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))\r\n nb_in = torch.zeros((self.n_pts_per_frame, 7, opt.n_neighb_pts))\r\n route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)).fill_(ary)\r\n rc_list = []\r\n # da2d+3d neighbor\r\n if not self.neighbor_da2d is None:\r\n sid = 0\r\n for tmp_idx in range(len(selected_ids)):\r\n r, c = rc_route[selected_ids[tmp_idx]][0], rc_route[selected_ids[tmp_idx]][1]\r\n if np.isnan(lclnmlclr[r, c, 3]):\r\n continue\r\n if self.specified_node:\r\n if not isSon(rc_route[selected_ids[tmp_idx]][2], self.specified_node):\r\n continue\r\n route_labs[sid] = torch.Tensor(rc_route[selected_ids[tmp_idx]][2])\r\n rc_list.append([r, c])\r\n pt_in[sid] = torch.Tensor(\r\n np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))\r\n da2d_list = (np.array(self.neighbor_da2d) / depth[r, c]).astype(int)\r\n # ppf\r\n pts9d = shift_pts9d(sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),\r\n lclnmlclr[r, c, :])\r\n cen9d = copy.deepcopy(lclnmlclr[r, c, :])\r\n cen9d[0:3] = np.zeros(3)\r\n ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,)\r\n ppf7d[np.isnan(ppf7d)] = 0.0\r\n nb_in[sid] = torch.Tensor(ppf7d).transpose(1, 0)\r\n # remove background by 3d radius\r\n xyz = pts9d[:, 0:3]\r\n ids_out_of_bound = np.linalg.norm(xyz, axis=1) > self.far_radius\r\n nb_in[sid, :, ids_out_of_bound] = 0.\r\n # count\r\n sid += 1\r\n if sid >= self.n_pts_per_frame:\r\n break\r\n pt_in = pt_in[:sid]\r\n nb_in = nb_in[:sid]\r\n route_labs = route_labs[:sid]\r\n return pt_in, nb_in, route_labs, fid, torch.Tensor(np.array(rc_list))\r\n\r\n\r\nclass TestDataset_PPF(Dataset):\r\n\r\n def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None):\r\n super().__init__()\r\n self.data_dir, self.the_list = data_dir, the_list\r\n self.n_pts_per_frame = n_pts_per_frame\r\n self.neighbor_da2d = neighbor_da2d # list of (n_pts, dim_pt)\r\n\r\n def __len__(self):\r\n return len(self.the_list)\r\n\r\n def __getitem__(self, idx):\r\n fid = self.the_list[idx]\r\n # load \r\n depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0\r\n color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]\r\n if np.max(color) > 1:\r\n color = color / 255. - 0.5\r\n local = depth2local(depth)\r\n r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))\r\n depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids,\r\n :]\r\n # normal by 3d neighbor plane fitting.\r\n normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)\r\n if os.path.exists(normal_path):\r\n # normal = np.load(normal_path)\r\n if os.path.getsize(normal_path) > 1:\r\n normal = np.load(normal_path, encoding='bytes', allow_pickle=True)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n else:\r\n normal = compute_points_normal(local)\r\n np.save(normal_path, normal)\r\n lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)\r\n # build a patch\r\n pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))\r\n nb_ms_in = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1, 7, opt.n_neighb_pts))\r\n route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1))\r\n r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)\r\n rc_list = []\r\n # da2d+3d neighbor\r\n if not self.neighbor_da2d is None:\r\n sid, count_crt, count_max = 0, 0, 9999\r\n mask = np.zeros((r_max, c_max))\r\n while len(rc_list) < self.n_pts_per_frame:\r\n # avoid infinite loop\r\n count_crt += 1\r\n if count_crt > count_max:\r\n break\r\n r, c = np.random.randint(0, r_max), np.random.randint(0, c_max)\r\n if depth[r, c] == 0. or mask[r, c] == 1.:\r\n continue\r\n if np.isnan(lclnmlclr[r, c, 3]):\r\n continue\r\n mask[r, c] = 1.\r\n rc_list.append([r, c])\r\n pt_in[sid] = torch.Tensor(\r\n np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))\r\n for lid in range(opt.tree_height - 1):\r\n da2d_list = (np.array(self.neighbor_da2d[lid]) / depth[r, c]).astype(int)\r\n # ppf\r\n pts9d = shift_pts9d(\r\n sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),\r\n lclnmlclr[r, c, :])\r\n cen9d = copy.deepcopy(lclnmlclr[r, c, :])\r\n cen9d[0:3] = np.zeros(3)\r\n ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,)\r\n ppf7d[np.isnan(ppf7d)] = 0.0\r\n nb_ms_in[sid, lid, :, :] = torch.Tensor(ppf7d).transpose(1, 0)\r\n # remove background by 3d radius\r\n xyz = pts9d[:, 0:3]\r\n ids_out_of_bound = np.linalg.norm(xyz, axis=1) > opt.far_radiuses[lid]\r\n nb_ms_in[sid, lid, :, ids_out_of_bound] = 0.\r\n # count\r\n sid += 1\r\n return pt_in, nb_ms_in, -1, fid, torch.Tensor(np.array(rc_list))\r\n\r\n\r\n# # debug\r\n# if __name__ == '__main__':\r\n# \tprint('done.')\r\n\r\n" ]
[ [ "torch.zeros", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.random.randint", "numpy.arange", "numpy.linalg.eig", "numpy.stack", "numpy.save", "numpy.load", "numpy.zeros", "numpy.min", "numpy.isnan", "numpy.random.rand", "numpy.logical_and", "numpy.array", "numpy.sum", "torch.Tensor", "numpy.linalg.norm", "numpy.random.shuffle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ischrot/scipy_rmt_bsc
[ "1dd8f7f0ee7ac1311ed1735ca6b6025150524418" ]
[ "scipy/optimize/tests/test_linesearch.py" ]
[ "\"\"\"\nTests for line search routines\n\"\"\"\nfrom numpy.testing import (assert_, assert_equal, assert_array_almost_equal,\n assert_array_almost_equal_nulp, assert_warns,\n suppress_warnings)\nimport scipy.optimize.linesearch as ls\nimport scipy.optimize.nonlin as nl #(LS)\nfrom scipy.linalg import norm\nfrom scipy.optimize.linesearch import LineSearchWarning\nimport numpy as np\nfrom copy import deepcopy # (IS)\n\n\ndef assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=\"\"):\n \"\"\"\n Check that strong Wolfe conditions apply\n \"\"\"\n phi1 = phi(s)\n phi0 = phi(0)\n derphi0 = derphi(0)\n derphi1 = derphi(s)\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s\" % (\n s, phi0, phi1, derphi0, derphi1, err_msg)\n\n assert_(phi1 <= phi0 + c1*s*derphi0, \"Wolfe 1 failed: \" + msg)\n assert_(abs(derphi1) <= abs(c2*derphi0), \"Wolfe 2 failed: \" + msg)\n\n\ndef assert_armijo(s, phi, c1=1e-4, err_msg=\"\"):\n \"\"\"\n Check that Armijo condition applies\n \"\"\"\n phi1 = phi(s)\n phi0 = phi(0)\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; %s\" % (s, phi0, phi1, err_msg)\n assert_(phi1 <= (1 - c1*s)*phi0, msg)\n\n###(LS)###\ndef assert_rmt(alpha, dx, F0, Fx_new, jacobian, param, c1=1e-4, err_msg=\"\"):\n \"\"\"\n Check that RMT condition applies\n \"\"\"\n\n\n parameters = ls.prepare_parameters('rmt',param,jacobian,dx)\n rmt_eta_upper = parameters['rmt_eta_upper']\n rmt_eta_lower = parameters['rmt_eta_lower']\n amin = parameters['amin']\n\n #Step 1: Eval t_dx_omega\n dxbar = jacobian.solve(\n Fx_new\n )\n\n dx_diff = dxbar + (1 - alpha) * dx # note that dx = - J(x_k)^(-1)F(x_k)\n\n nominator = 2 * norm(dx_diff)\n denominator = alpha * norm(dx)\n\n t_dx_omega = nominator / denominator\n\n tester = (rmt_eta_lower <= t_dx_omega and t_dx_omega <= rmt_eta_upper) or (rmt_eta_lower > t_dx_omega and alpha == 1.0)\n\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; %s\" % (alpha, F0, Fx_new, err_msg)\n assert_(tester or (alpha<amin), msg)\n\n\ndef assert_bsc(alpha, x, dx, func, old_jacobian, param, err_msg):\n parameters = ls.prepare_parameters('bsc',param, old_jacobian, dx)\n H_lower = parameters['H_lower']\n H_upper = parameters['H_upper']\n amin = parameters['amin']\n\n x_new = x + alpha * dx\n Fx_new = func(x_new)\n jacobian = deepcopy(old_jacobian)\n jacobian.update(\n x_new.copy(),\n Fx_new\n )\n dx_next_it = -jacobian.solve(\n Fx_new\n )\n dx_diff = dx_next_it - dx\n H_prime = alpha * norm(dx_diff)\n\n tester = (H_lower <= H_prime and H_prime <= H_upper) or (H_lower > H_prime and alpha >= 1.0)\n\n msg = \"s = %s; phi(0) = %s; phi(s) = %s; %s\" % (alpha, func(x), Fx_new, err_msg)\n\n assert_(tester or (alpha<amin), msg)\n###(LS)###\n\ndef assert_line_wolfe(x, p, s, f, fprime, **kw):\n assert_wolfe(s, phi=lambda sp: f(x + p*sp),\n derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)\n\n\ndef assert_line_armijo(x, p, s, f, **kw):\n assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)\n\n\ndef assert_fp_equal(x, y, err_msg=\"\", nulp=50):\n \"\"\"Assert two arrays are equal, up to some floating-point rounding error\"\"\"\n try:\n assert_array_almost_equal_nulp(x, y, nulp)\n except AssertionError as e:\n raise AssertionError(\"%s\\n%s\" % (e, err_msg)) from e\n\n\nclass TestLineSearch(object):\n # -- scalar functions; must have dphi(0.) < 0\n def _scalar_func_1(self, s):\n self.fcount += 1\n p = -s - s**3 + s**4\n dp = -1 - 3*s**2 + 4*s**3\n return p, dp\n\n def _scalar_func_2(self, s):\n self.fcount += 1\n p = np.exp(-4*s) + s**2\n dp = -4*np.exp(-4*s) + 2*s\n return p, dp\n\n def _scalar_func_3(self, s):\n self.fcount += 1\n p = -np.sin(10*s)\n dp = -10*np.cos(10*s)\n return p, dp\n\n # -- n-d functions\n\n def _line_func_1(self, x):\n self.fcount += 1\n f = np.dot(x, x)\n df = 2*x\n return f, df\n\n def _line_func_2(self, x):\n self.fcount += 1\n f = np.dot(x, np.dot(self.A, x)) + 1\n df = np.dot(self.A + self.A.T, x)\n return f, df\n\n # --\n\n def setup_method(self):\n self.scalar_funcs = []\n self.line_funcs = []\n self.N = 20\n self.fcount = 0\n\n def bind_index(func, idx):\n # Remember Python's closure semantics!\n return lambda *a, **kw: func(*a, **kw)[idx]\n\n for name in sorted(dir(self)):\n if name.startswith('_scalar_func_'):\n value = getattr(self, name)\n self.scalar_funcs.append(\n (name, bind_index(value, 0), bind_index(value, 1)))\n elif name.startswith('_line_func_'):\n value = getattr(self, name)\n self.line_funcs.append(\n (name, bind_index(value, 0), bind_index(value, 1)))\n\n np.random.seed(1234)\n self.A = np.random.randn(self.N, self.N)\n\n def scalar_iter(self):\n for name, phi, derphi in self.scalar_funcs:\n for old_phi0 in np.random.randn(3):\n yield name, phi, derphi, old_phi0\n\n def line_iter(self):\n for name, f, fprime in self.line_funcs:\n k = 0\n while k < 9:\n x = np.random.randn(self.N)\n p = np.random.randn(self.N)\n if np.dot(p, fprime(x)) >= 0:\n # always pick a descent direction\n continue\n k += 1\n old_fv = float(np.random.randn())\n yield name, f, fprime, x, p, old_fv\n\n # -- Generic scalar searches\n\n def test_scalar_search_wolfe1(self):\n c = 0\n for name, phi, derphi, old_phi0 in self.scalar_iter():\n c += 1\n s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),\n old_phi0, derphi(0))\n assert_fp_equal(phi0, phi(0), name)\n assert_fp_equal(phi1, phi(s), name)\n assert_wolfe(s, phi, derphi, err_msg=name)\n\n assert_(c > 3) # check that the iterator really works...\n\n def test_scalar_search_wolfe2(self):\n for name, phi, derphi, old_phi0 in self.scalar_iter():\n s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(\n phi, derphi, phi(0), old_phi0, derphi(0))\n assert_fp_equal(phi0, phi(0), name)\n assert_fp_equal(phi1, phi(s), name)\n if derphi1 is not None:\n assert_fp_equal(derphi1, derphi(s), name)\n assert_wolfe(s, phi, derphi, err_msg=\"%s %g\" % (name, old_phi0))\n\n def test_scalar_search_wolfe2_with_low_amax(self):\n def phi(alpha):\n return (alpha - 5) ** 2\n\n def derphi(alpha):\n return 2 * (alpha - 5)\n\n s, _, _, _ = assert_warns(LineSearchWarning,\n ls.scalar_search_wolfe2, phi, derphi, amax=0.001)\n assert_(s is None)\n\n def test_scalar_search_wolfe2_regression(self):\n # Regression test for gh-12157\n # This phi has its minimum at alpha=4/3 ~ 1.333.\n def phi(alpha):\n if alpha < 1:\n return - 3*np.pi/2 * (alpha - 1)\n else:\n return np.cos(3*np.pi/2 * alpha - np.pi)\n\n def derphi(alpha):\n if alpha < 1:\n return - 3*np.pi/2\n else:\n return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)\n\n s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)\n # Without the fix in gh-13073, the scalar_search_wolfe2\n # returned s=2.0 instead.\n assert_(s < 1.5)\n\n def test_scalar_search_armijo(self):\n for name, phi, derphi, old_phi0 in self.scalar_iter():\n s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))\n assert_fp_equal(phi1, phi(s), name)\n assert_armijo(s, phi, err_msg=\"%s %g\" % (name, old_phi0))\n\n ###(LS)###\n ##RMT not usefull for scalar functions, thus no need for test_scalar_search_rmt?\n\n def test_line_search_rmt(self):\n #There is at least 1 function R^20->R to be tested, but this leads to s=None\n for name, f, fprime, x, p, old_f in self.line_iter():\n jac = lambda x: fprime(x)\n x0 = nl._as_inexact(x)\n func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()\n x = x0.flatten()\n jacobian = nl.asjacobian(jac)\n jacobian.setup(x.copy(), f(x), func)\n options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}\n #print(\"1: \",f(x),np.shape(fprime(x)))\n s, dxbar, f_new = ls.scalar_search_rmt(f, x, fprime(x), parameters=options)\n #print(\"2: \",p_new, s)\n assert_fp_equal(f_new, x+s*fprime(x), name)\n assert_rmt(s, fprime(x), f(x), f_new, jacobian, options, err_msg=\"%s %g\" % name)\n\n\n def test_line_search_bsc(self):\n #There is at least 1 function R^20->R to be tested, but this leads to s=None\n for name, f, fprime, x, p, old_f in self.line_iter():\n jac = lambda x: fprime(x)\n x0 = nl._as_inexact(x)\n func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()\n x = x0.flatten()\n jacobian = nl.asjacobian(jac)\n jacobian.setup(x.copy(), f(x), func)\n options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}\n #print(\"1: \",f(x),np.shape(dp(x)))\n s, f_new= ls.scalar_search_bsc(func, x, fprime(x), f(x), parameters=options)\n #print(\"2: \",p_new, s)\n assert_fp_equal(f_new, x+s*fprime(x), name)\n assert_bsc(s, x, fprime(x), func, jacobian, options, err_msg=\"%s %g\" % name)\n ###(LS)###\n \n # -- Generic line searches\n\n def test_line_search_wolfe1(self):\n c = 0\n smax = 100\n for name, f, fprime, x, p, old_f in self.line_iter():\n f0 = f(x)\n g0 = fprime(x)\n self.fcount = 0\n s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,\n g0, f0, old_f,\n amax=smax)\n assert_equal(self.fcount, fc+gc)\n assert_fp_equal(ofv, f(x))\n if s is None:\n continue\n assert_fp_equal(fv, f(x + s*p))\n assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)\n if s < smax:\n c += 1\n assert_line_wolfe(x, p, s, f, fprime, err_msg=name)\n\n assert_(c > 3) # check that the iterator really works...\n\n def test_line_search_wolfe2(self):\n c = 0\n smax = 512\n for name, f, fprime, x, p, old_f in self.line_iter():\n f0 = f(x)\n g0 = fprime(x)\n self.fcount = 0\n with suppress_warnings() as sup:\n sup.filter(LineSearchWarning,\n \"The line search algorithm could not find a solution\")\n sup.filter(LineSearchWarning,\n \"The line search algorithm did not converge\")\n s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,\n g0, f0, old_f,\n amax=smax)\n assert_equal(self.fcount, fc+gc)\n assert_fp_equal(ofv, f(x))\n assert_fp_equal(fv, f(x + s*p))\n if gv is not None:\n assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)\n if s < smax:\n c += 1\n assert_line_wolfe(x, p, s, f, fprime, err_msg=name)\n assert_(c > 3) # check that the iterator really works...\n\n def test_line_search_wolfe2_bounds(self):\n # See gh-7475\n\n # For this f and p, starting at a point on axis 0, the strong Wolfe\n # condition 2 is met if and only if the step length s satisfies\n # |x + s| <= c2 * |x|\n f = lambda x: np.dot(x, x)\n fp = lambda x: 2 * x\n p = np.array([1, 0])\n\n # Smallest s satisfying strong Wolfe conditions for these arguments is 30\n x = -60 * p\n c2 = 0.5\n\n s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)\n assert_line_wolfe(x, p, s, f, fp)\n\n s, _, _, _, _, _ = assert_warns(LineSearchWarning,\n ls.line_search_wolfe2, f, fp, x, p,\n amax=29, c2=c2)\n assert_(s is None)\n\n # s=30 will only be tried on the 6th iteration, so this won't converge\n assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,\n c2=c2, maxiter=5)\n\n def test_line_search_armijo(self):\n c = 0\n for name, f, fprime, x, p, old_f in self.line_iter():\n f0 = f(x)\n g0 = fprime(x)\n self.fcount = 0\n s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)\n c += 1\n assert_equal(self.fcount, fc)\n assert_fp_equal(fv, f(x + s*p))\n assert_line_armijo(x, p, s, f, err_msg=name)\n assert_(c >= 9)\n\n # -- More specific tests\n\n def test_armijo_terminate_1(self):\n # Armijo should evaluate the function only once if the trial step\n # is already suitable\n count = [0]\n\n def phi(s):\n count[0] += 1\n return -s + 0.01*s**2\n s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)\n assert_equal(s, 1)\n assert_equal(count[0], 2)\n assert_armijo(s, phi)\n\n def test_wolfe_terminate(self):\n # wolfe1 and wolfe2 should also evaluate the function only a few\n # times if the trial step is already suitable\n\n def phi(s):\n count[0] += 1\n return -s + 0.05*s**2\n\n def derphi(s):\n count[0] += 1\n return -1 + 0.05*2*s\n\n for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:\n count = [0]\n r = func(phi, derphi, phi(0), None, derphi(0))\n assert_(r[0] is not None, (r, func))\n assert_(count[0] <= 2 + 2, (count, func))\n assert_wolfe(r[0], phi, derphi, err_msg=str(func))\n" ]
[ [ "numpy.dot", "scipy.optimize.nonlin.asjacobian", "scipy.optimize.linesearch.line_search_armijo", "numpy.random.randn", "numpy.exp", "numpy.testing.assert_equal", "scipy.optimize.nonlin._array_like", "scipy.optimize.linesearch.scalar_search_wolfe2", "numpy.testing.suppress_warnings", "scipy.optimize.linesearch.line_search_wolfe1", "numpy.sin", "scipy.optimize.nonlin._as_inexact", "scipy.linalg.norm", "scipy.optimize.linesearch.line_search_wolfe2", "scipy.optimize.linesearch.prepare_parameters", "numpy.testing.assert_array_almost_equal_nulp", "numpy.testing.assert_", "numpy.array", "numpy.testing.assert_warns", "numpy.random.seed", "numpy.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lxchen2019/Python-Baseball
[ "0498830e92c67de8221aac1777651ae141df0ec6" ]
[ "stats/attendance.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom data import games\n\nattendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']]\nattendance.columns = ['year', 'attendance']\n\nattendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])\nattendance.plot(x='year', y='attendance', figsize = (15, 7), kind = 'bar')\n\nplt.xlabel('Year')\nplt.ylabel('Attendance')\nplt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "pandas.to_numeric", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
xinglu/Tensorflow-2.0-Computer-Vision-Cookbook
[ "92ea6713f664cff9eccaaccea8ac756f808e2066", "92ea6713f664cff9eccaaccea8ac756f808e2066" ]
[ "ch1/recipe4/load_save_model.py", "ch1/recipe2/load_image_keras.py" ]
[ "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import *\n\n\ndef load_data():\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n # Normalize data.\n X_train = X_train.astype('float32') / 255.0\n X_test = X_test.astype('float32') / 255.0\n\n # Reshape grayscale to include channel dimension.\n X_train = np.expand_dims(X_train, axis=3)\n X_test = np.expand_dims(X_test, axis=3)\n\n # Process labels.\n label_binarizer = LabelBinarizer()\n y_train = label_binarizer.fit_transform(y_train)\n y_test = label_binarizer.fit_transform(y_test)\n\n return X_train, y_train, X_test, y_test\n\n\ndef build_network():\n input_layer = Input(shape=(28, 28, 1), name='input_layer')\n convolution_1 = Conv2D(kernel_size=(2, 2),\n padding='same',\n strides=(2, 2),\n filters=32,\n name='convolution_1')(input_layer)\n activation_1 = ReLU(name='activation_1')(convolution_1)\n batch_normalization_1 = BatchNormalization(name='batch_normalization_1')(activation_1)\n pooling_1 = MaxPooling2D(pool_size=(2, 2),\n strides=(1, 1),\n padding='same',\n name='pooling_1')(batch_normalization_1)\n dropout = Dropout(rate=0.5, name='dropout')(pooling_1)\n\n flatten = Flatten(name='flatten')(dropout)\n dense_1 = Dense(units=128, name='dense_1')(flatten)\n activation_2 = ReLU(name='activation_2')(dense_1)\n dense_2 = Dense(units=10, name='dense_2')(activation_2)\n output = Softmax(name='output')(dense_2)\n\n network = Model(inputs=input_layer, outputs=output, name='my_model')\n\n return network\n\n\ndef evaluate(model, X_test, y_test):\n _, accuracy = model.evaluate(X_test, y_test, verbose=0)\n print(f'Accuracy: {accuracy}')\n\n\nprint('Loading and pre-processing data.')\nX_train, y_train, X_test, y_test = load_data()\n\n# Split dataset.\nX_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, train_size=0.8)\n\n# Build network.\nmodel = build_network()\n\n# Compile and train model.\nprint('Training network...')\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=40, batch_size=1024)\n\nprint('Saving model and weights as HDF5.')\nmodel.save('model_and_weights.hdf5')\n\nprint('Loading model and weights as HDF5.')\nloaded_model = load_model('model_and_weights.hdf5')\n\nprint('Evaluating using loaded model.')\nevaluate(loaded_model, X_test, y_test)\n", "import glob\nimport os\nimport tarfile\n\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.preprocessing.image import *\nfrom tensorflow.keras.utils import get_file\n\nDATASET_URL = 'https://datashare.is.ed.ac.uk/bitstream/handle/10283/3192/CINIC-10.tar.gz?sequence=4&isAllowed=y'\nDATA_NAME = 'cinic10'\nFILE_EXTENSION = 'tar.gz'\nFILE_NAME = '.'.join([DATA_NAME, FILE_EXTENSION])\n\n# Downloading the data.\ndownloaded_file_location = get_file(origin=DATASET_URL, fname=FILE_NAME, extract=False)\n\ndata_directory, _ = downloaded_file_location.rsplit(os.path.sep, maxsplit=1)\ndata_directory = os.path.sep.join([data_directory, DATA_NAME])\ntar = tarfile.open(downloaded_file_location)\n\nif not os.path.exists(data_directory):\n tar.extractall(data_directory)\n\nprint(f'Data downloaded to {data_directory}')\ndata_pattern = os.path.sep.join([data_directory, '*/*/*.png'])\n\nimage_paths = list(glob.glob(data_pattern))\nprint(f'Sample image path: {image_paths[0]}')\n\n# Load a single image\nsample_image = load_img(image_paths[0])\nprint(f'Image type: {type(sample_image)}')\nprint(f'Image format: {sample_image.format}')\nprint(f'Image mode: {sample_image.mode}')\nprint(f'Image size: {sample_image.size}')\n\n# Convert image to array\nsample_image_array = img_to_array(sample_image)\nprint(f'Image array shape: {sample_image_array.shape}')\nplt.imshow(sample_image_array / 255.0)\n\n# Load a a batch of images.\nscale_factor = 1.0 / 255.0\nimage_generator = ImageDataGenerator(horizontal_flip=True, rescale=scale_factor)\n\niterator = (image_generator\n .flow_from_directory(directory=data_directory,\n batch_size=10))\nfor batch, _ in iterator:\n plt.figure(figsize=(5, 5))\n for index, image in enumerate(batch, start=1):\n ax = plt.subplot(5, 5, index)\n plt.imshow(image)\n plt.axis('off')\n\n plt.show()\n break\n" ]
[ [ "numpy.expand_dims", "sklearn.preprocessing.LabelBinarizer", "sklearn.model_selection.train_test_split", "tensorflow.keras.datasets.mnist.load_data" ], [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplot", "tensorflow.keras.utils.get_file", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
hadi-gharibi/pupil
[ "9d266572cc1ebf659e87206be6e5f1548959d510" ]
[ "pupil/models/clustering.py" ]
[ "from abc import ABC, abstractmethod\nfrom typing import Dict, Protocol, Tuple\n\nimport faiss\nimport numpy as np\nfrom pupil.types import NDArray2D\nfrom sklearn.cluster import AgglomerativeClustering\n\n\nclass Clustering(Protocol):\n n_clusters: int\n\n def fit(self, X: NDArray2D):\n ...\n\n def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n\n ...\n\n def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n \"\"\"After having the center of your clusters, you can use this function to see the distance from X and center of all clusters\n\n Args:\n X (NDArray2D): The input to check.\n\n Returns:\n Tuple[NDArray2D, NDArray2D]: Return (Distances, cluster_ids). Shape of each: (#queries, #clusters)\n \"\"\"\n ...\n\n\nclass FaissKMeansClustering:\n def __init__(\n self,\n n_clusters: int,\n n_init: int = 10,\n max_iter: int = 100,\n ) -> None:\n self.n_clusters = n_clusters\n self.n_init = n_init\n self.max_iter = max_iter\n self.cluster_centers_ = None\n self.inertia_ = None\n\n def fit(self, X: NDArray2D) -> None:\n self.kmeans = faiss.Kmeans(\n d=X.shape[1],\n k=self.n_clusters,\n niter=self.max_iter,\n nredo=self.n_init,\n )\n X = X / np.linalg.norm(X)\n self.kmeans.train(X.astype(np.float32))\n self.cluster_centers_ = self.kmeans.centroids\n self.inertia_ = self.kmeans.obj[-1]\n\n def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n X = X / np.linalg.norm(X)\n return self.kmeans.index.search(X.astype(np.float32), 1) # type: ignore\n\n def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:\n X = X / np.linalg.norm(X)\n D, I = self.kmeans.index.search(X.astype(np.float32), self.n_clusters) # type: ignore\n return D, I\n\n\nclass Splitter(Protocol):\n def fit(self, X: NDArray2D, clsuter_inds: NDArray2D):\n ...\n\n @property\n def splits(\n self,\n ):\n ...\n\n\nclass Distance1DSplitter:\n def __init__(self, nsplits=3):\n self.nsplits = nsplits\n\n def fit(self, X: NDArray2D, clsuter_inds: NDArray2D) -> None:\n self.clsuter_inds = clsuter_inds\n self.alg = AgglomerativeClustering(n_clusters=self.nsplits)\n self.alg.fit(X.reshape((-1, 1)))\n self._tag_to_index_dict = self._tag_to_index()\n\n def _tag_to_index(self) -> Dict[str, Tuple[int, int]]:\n tags = [\"priority_\" + str(i) for i in range(self.nsplits)]\n\n inds = np.argwhere(np.diff(self.alg.labels_) != 0).flatten().tolist()\n inds.insert(0, -1)\n inds.append(len(self.alg.labels_))\n\n tag_dict = {}\n for i, end in enumerate(inds[1:]):\n start = inds[i] + 1\n tag_dict[tags[i]] = (start, end + 1)\n return tag_dict\n\n @property\n def splits(self):\n res = {}\n for k, v in self._tag_to_index_dict.items():\n res[k] = self.clsuter_inds[0][v[0] : v[1]]\n return res\n" ]
[ [ "sklearn.cluster.AgglomerativeClustering", "numpy.diff", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
h1-the-swan/paper_collection
[ "f07ad5cd8c40ddd75df2031b15c49eee60f1d914" ]
[ "tests/test_paper_collection.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"Tests for `paper_collection` package.\"\"\"\n\n\nimport unittest\n\nfrom paper_collection import paper_collection\n\nimport pandas as pd\nimport numpy as np\n\n\nclass TestPaper_collection(unittest.TestCase):\n \"\"\"Tests for `paper_collection` package.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test fixtures, if any.\"\"\"\n self.df_papers = pd.read_csv('tests/jw_papers_mag2019.tsv', sep='\\t')\n self.df_papers.drop_duplicates(subset=['PaperId'], inplace=True)\n self.num_papers = len(self.df_papers)\n self.df_citations = pd.read_csv('tests/jw_citations_mag2019.tsv', sep='\\t')\n self.num_citations = len(self.df_citations)\n self.df_authors = pd.read_csv('tests/jw_PaperAuthorAffiliations_mag2019.tsv', sep='\\t')\n self.authors_by_paper = self.get_authors_by_paper(self.df_authors)\n\n def tearDown(self):\n \"\"\"Tear down test fixtures, if any.\"\"\"\n\n def get_authors_by_paper(self, df_authors):\n \"\"\"Get a dictionary mapping paper_id to author data\n\n \"\"\"\n author_data = {}\n for paper_id, group in df_authors.groupby('PaperId'):\n group = group.sort_values('AuthorSequenceNumber')\n this_authors = []\n for _, row in group.iterrows():\n this_authors.append({'name': row.OriginalAuthor, 'author_id': row.AuthorId})\n author_data[paper_id] = this_authors\n return author_data\n\n def load_paper(self, prow):\n paper_id = prow.PaperId\n authors = self.authors_by_paper[paper_id]\n return paper_collection.Paper(dataset='mag',\n dataset_version='mag-2019-11-22',\n paper_id=paper_id,\n title=prow.PaperTitle,\n display_title=prow.OriginalTitle,\n doi=prow.Doi,\n pub_date=prow.Date,\n year=prow.Year,\n venue=prow.OriginalVenue,\n authors=authors,\n node_rank=prow.flow)\n\n def test_000_single_paper(self):\n \"\"\"Load a single paper\"\"\"\n prow = self.df_papers.iloc[0]\n p = self.load_paper(prow)\n assert p.display_title is not None\n assert len(p.display_title)\n\n def test_001_collection(self):\n \"\"\"Load a collection\"\"\"\n coll = paper_collection.PaperCollection(description=\"Paper Collection\")\n for _, prow in self.df_papers.iterrows():\n p = self.load_paper(prow)\n coll.papers.append(p)\n assert len(coll) == self.num_papers\n\n def test_002_graph(self):\n \"\"\"Construct graph\"\"\"\n coll = paper_collection.PaperCollection(description=\"Paper Collection\")\n for _, prow in self.df_papers.iterrows():\n p = self.load_paper(prow)\n coll.papers.append(p)\n for _, row in self.df_citations.iterrows():\n coll.citations.append((row.PaperId, row.PaperReferenceId))\n G = coll.construct_graph()\n assert G.number_of_nodes() == self.num_papers\n assert G.number_of_edges() == self.num_citations\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mindspore-ai/mindspore
[ "a9fbb25530a2874166ff0045ddcdfc73207bf5eb", "a9fbb25530a2874166ff0045ddcdfc73207bf5eb", "a9fbb25530a2874166ff0045ddcdfc73207bf5eb", "a9fbb25530a2874166ff0045ddcdfc73207bf5eb" ]
[ "mindspore/nn/optim/thor.py", "mindspore/dataset/engine/datasets.py", "tests/st/pynative/test_tensor_getitem.py", "tests/st/ops/graph_kernel/custom/test_custom_akg.py" ]
[ "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"thor\"\"\"\nimport numpy as np\nfrom mindspore.ops import functional as F, composite as C, operations as P\nfrom mindspore.common.initializer import initializer\nfrom mindspore.common.parameter import Parameter, ParameterTuple\nfrom mindspore.common.tensor import Tensor\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nimport mindspore.log as logger\nfrom mindspore._checkparam import Validator\nfrom mindspore.nn.optim.optimizer import Optimizer\nfrom mindspore.parallel._utils import _get_device_num, _get_gradients_mean\nfrom mindspore import context\nfrom mindspore.context import ParallelMode\nfrom mindspore.nn.layer import DenseThor, Conv2dThor, EmbeddingThor, EmbeddingLookupThor\nfrom mindspore.nn.wrap import DistributedGradReducer\nfrom mindspore.train.train_thor.convert_utils import ConvertNetUtils\nfrom mindspore.parallel._auto_parallel_context import auto_parallel_context\n\n\n# Enumerates types of Layer\nOther = -1\nConv = 1\nFC = 2\nEmbedding = 3\nLayerNorm = 4\nBatchNorm = 5\n\nop_add = P.AddN()\napply_decay = C.MultitypeFuncGraph(\"apply_decay\")\n_momentum_opt = C.MultitypeFuncGraph(\"momentum_opt\")\n\n\n@apply_decay.register(\"Number\", \"Bool\", \"Tensor\", \"Tensor\")\ndef _tensor_apply_decay(weight_decay, if_apply, weight, gradient):\n \"\"\"Get grad with weight_decay.\"\"\"\n if if_apply:\n return op_add((weight * weight_decay, gradient))\n return gradient\n\n\n@_momentum_opt.register(\"Function\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\", \"Tensor\")\ndef _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment):\n \"\"\"Apply momentum optimizer to the weight parameter using Tensor.\"\"\"\n success = True\n success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum))\n return success\n\nIS_ENABLE_GLOBAL_NORM = False\nGRADIENT_CLIP_TYPE = 1\nGRADIENT_CLIP_VALUE = 1.0\nclip_grad = C.MultitypeFuncGraph(\"clip_grad\")\nhyper_map_op = C.HyperMap()\n\n\n@clip_grad.register(\"Number\", \"Number\", \"Tensor\")\ndef _clip_grad(clip_type, clip_value, grad):\n \"\"\"\n Clip gradients.\n\n Inputs:\n clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.\n clip_value (float): Specifies how much to clip.\n grad (tuple[Tensor]): Gradients.\n\n Outputs:\n tuple[Tensor], clipped gradients.\n \"\"\"\n if clip_type not in [0, 1]:\n return grad\n dt = F.dtype(grad)\n if clip_type == 0:\n new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),\n F.cast(F.tuple_to_array((clip_value,)), dt))\n else:\n new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))\n return new_grad\n\n\ndef clip_gradient(enable_clip_grad, gradients):\n \"\"\"clip gradients\"\"\"\n if enable_clip_grad:\n if IS_ENABLE_GLOBAL_NORM:\n gradients = C.clip_by_global_norm(gradients, GRADIENT_CLIP_VALUE, None)\n else:\n gradients = hyper_map_op(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), gradients)\n return gradients\n\nC0 = 16\n\n\ndef _check_param(momentum, frequency, lr, cls_name):\n \"\"\"Check param.\"\"\"\n Validator.check_value_type(\"momentum\", momentum, [float], cls_name)\n if isinstance(momentum, float) and momentum < 0.0:\n raise ValueError(\"momentum should be at least 0.0, but got momentum {}\".format(momentum))\n Validator.check_value_type(\"frequency\", frequency, [int], cls_name)\n if isinstance(frequency, int) and frequency < 2:\n raise ValueError(\"frequency should be at least 2, but got frequency {}\".format(frequency))\n Validator.check_value_type(\"learning rate\", lr, [Tensor], cls_name)\n\n\ndef caculate_device_shape(matrix_dim, channel, is_a):\n if is_a:\n if channel // C0 == 0:\n matrix_dim = (matrix_dim / channel) * C0\n ll = (int(matrix_dim // C0), int(matrix_dim // C0), C0, C0), int(matrix_dim)\n return ll\n\n\ndef is_conv_matmul_support_shape(matrix_a_shape, matrix_g_shape):\n \"\"\"is conv layer matmul support shape\"\"\"\n temp = (matrix_g_shape, matrix_a_shape)\n support_shape = [((4, 4, 16, 16), (49, 49, 16, 16)),\n ((4, 4, 16, 16), (4, 4, 16, 16)),\n ((4, 4, 16, 16), (36, 36, 16, 16)),\n ((16, 16, 16, 16), (4, 4, 16, 16)),\n ((4, 4, 16, 16), (16, 16, 16, 16)),\n ((8, 8, 16, 16), (16, 16, 16, 16)),\n ((8, 8, 16, 16), (72, 72, 16, 16)),\n ((32, 32, 16, 16), (8, 8, 16, 16)),\n ((32, 32, 16, 16), (16, 16, 16, 16)),\n ((8, 8, 16, 16), (32, 32, 16, 16)),\n ((16, 16, 16, 16), (32, 32, 16, 16)),\n ((16, 16, 16, 16), (144, 144, 16, 16)),\n ((64, 64, 16, 16), (16, 16, 16, 16)),\n ((64, 64, 16, 16), (32, 32, 16, 16)),\n ((16, 16, 16, 16), (64, 64, 16, 16)),\n ((32, 32, 16, 16), (64, 64, 16, 16)),\n ((32, 32, 16, 16), (288, 288, 16, 16)),\n ((128, 128, 16, 16), (32, 32, 16, 16)),\n ((128, 128, 16, 16), (64, 64, 16, 16)),\n ((32, 32, 16, 16), (128, 128, 16, 16))]\n if temp in support_shape:\n return True\n return False\n\n\ndef caculate_matmul_shape(matrix_a_dim, matrix_g_dim, split_dim):\n \"\"\"get matmul shape\"\"\"\n split_dima = split_dim\n split_dimg = split_dim\n if matrix_a_dim % split_dim == 0:\n batch_w = matrix_a_dim // split_dim\n else:\n if matrix_a_dim < split_dim:\n batch_w = 1\n split_dima = matrix_a_dim\n else:\n batch_w = matrix_a_dim // split_dim + 1\n\n if matrix_g_dim % split_dim == 0:\n batch_h = matrix_g_dim // split_dim\n else:\n if matrix_g_dim < split_dim:\n batch_h = 1\n split_dimg = matrix_g_dim\n else:\n batch_h = matrix_g_dim // split_dim + 1\n matrix_a_shape = (batch_h, batch_w, split_dima, split_dima)\n matrix_g_shape = (batch_h, split_dimg, split_dimg)\n return matrix_a_shape, matrix_g_shape\n\n\ndef get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map):\n \"\"\"get layer type for dense layer and conv layer\"\"\"\n if subcell.weight.requires_grad:\n if \"rpn_with_loss.rpn_convs_list.\" not in prefix.lower() \\\n or \"rpn_with_loss.rpn_convs_list.0.\" in prefix.lower():\n layertype_map.append(Other)\n\n\ndef find_net_layertype_recur(net, layertype_map):\n \"\"\"get net layer type recursively.\"\"\"\n cells = net.name_cells()\n for name in cells:\n subcell = cells[name]\n prefix = subcell.param_prefix\n if subcell == net:\n continue\n elif isinstance(subcell, Conv2dThor):\n layertype_map.append(Conv)\n elif isinstance(subcell, DenseThor):\n layertype_map.append(FC)\n elif isinstance(subcell, (EmbeddingThor, EmbeddingLookupThor)):\n layertype_map.append(Embedding)\n elif isinstance(subcell, nn.LayerNorm):\n layertype_map.append(LayerNorm)\n elif isinstance(subcell, nn.BatchNorm2d):\n if subcell.gamma.requires_grad:\n layertype_map.append(BatchNorm)\n elif isinstance(subcell, (nn.Conv2d, nn.Dense, nn.Embedding, nn.Conv2dTranspose, nn.Conv1d, nn.Conv1dTranspose,\n nn.BatchNorm1d, nn.GroupNorm, nn.GlobalBatchNorm)):\n if isinstance(subcell, (nn.Dense, nn.Conv2d)):\n get_layer_type_for_dense_and_conv(subcell, prefix, layertype_map)\n else:\n layertype_map.append(Other)\n else:\n find_net_layertype_recur(subcell, layertype_map)\n\n\ndef get_net_layertype_mask(net):\n layertype_map = []\n find_net_layertype_recur(net, layertype_map)\n return layertype_map\n\n\ndef get_layer_counter(layer_type, layer_counter, params, idx):\n \"\"\"get layer counter\"\"\"\n if layer_type in [Conv, FC]:\n if \"bias\" in params[idx].name.lower():\n layer_counter = layer_counter + 1\n else:\n if idx < len(params) - 1 and \"bias\" not in params[idx + 1].name.lower():\n layer_counter = layer_counter + 1\n elif layer_type in [LayerNorm, BatchNorm]:\n if \"beta\" in params[idx].name.lower():\n layer_counter = layer_counter + 1\n else:\n if \"bias\" in params[idx].name.lower():\n layer_counter = layer_counter + 1\n elif \"weight\" in params[idx].name.lower():\n if idx < len(params) - 1 and \"bias\" not in params[idx + 1].name.lower():\n layer_counter = layer_counter + 1\n else:\n layer_counter = layer_counter + 1\n return layer_counter\n\n\ndef thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,\n use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False,\n frequency=100):\n r\"\"\"\n Updates gradients by second-order algorithm--THOR.\n\n Trace-based Hardware-driven layer-ORiented Natural Gradient Descent Computation (THOR) algorithm is proposed in:\n\n `THOR: Trace-based Hardware-driven layer-ORiented Natural Gradient Descent Computation\n <https://www.aaai.org/AAAI21Papers/AAAI-6611.ChenM.pdf>`_\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{array}{ll} \\\\\n A_i = a_i{a_i}^T \\\\\n G_i = D_{s_i}{ D_{s_i}}^T \\\\\n m_i = \\beta * m_i + ({G_i^{(k)}}+\\lambda I)^{-1}) g_i ({\\overline A_{i-1}^{(k)}}+\\lambda I)^{-1} \\\\\n w_i = w_i - \\alpha * m_i \\\\\n \\end{array}\n\n :math:`D_{s_i}` represents the derivative of the loss function of the output of the i-th layer,\n :math:`a_{i-1}` represents the input of i-th layer,and which is the activations of previous layer,\n :math:`\\beta` represents momentum, :math:`I` represents the identity matrix,\n :math:`\\overline A` represents the transpose of matrix A,\n :math:`\\lambda` represents 'damping', :math:`g_i` represents gradients of the i-th layer,\n :math:`\\otimes` represents Kronecker product, :math:`\\alpha` represents 'learning rate'\n\n Note:\n When separating parameter groups, the weight decay in each group will be applied on the parameters if the\n weight decay is positive. When not separating parameter groups, the `weight_decay` in the API will be applied\n on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.\n\n When separating parameter groups, if you want to centralize the gradient, set grad_centralization to True,\n but the gradient centralization can only be applied to the parameters of the convolution layer.\n If the parameters of the non convolution layer are set to True, an error will be reported.\n\n To improve parameter groups performance, the customized order of parameters can be supported.\n\n Args:\n net (Cell): The training network.\n\n learning_rate (Tensor): A value for the learning rate.\n\n damping (Tensor): A value for the damping.\n\n momentum (float): Hyper-parameter of type float, means momentum for the moving average. It must be at least 0.0.\n\n weight_decay (int, float): Weight decay (L2 penalty). It must be equal to or greater than 0.0. Default: 0.0.\n\n loss_scale (float): A value for the loss scale. It must be greater than 0.0. In general, use the\n default value. Default: 1.0.\n\n batch_size (int): The size of a batch. Default: 32\n\n use_nesterov (bool): Enable Nesterov momentum. Default: False.\n\n decay_filter (function): A function to determine which layers the weight decay applied to. And it\n only works when the weight_decay > 0. Default: lambda x: x.name not in []\n\n split_indices (list): Set allreduce fusion strategy by A/G layer indices . Only works when distributed\n computing. ResNet50 as an example, there are 54 layers of A/G respectively, when split_indices is set\n to [26, 53], it means A/G is divided into two groups to allreduce, one is 0~26 layer, and the other\n is 27~53. Default: None\n\n enable_clip_grad (bool): Whether to clip the gradients. Default: False\n\n frequency(int): The update interval of A/G and $A^{-1}/G^{-1}$. When frequency equals N (N is greater than 1),\n A/G and $A^{-1}/G^{-1}$ will be updated every N steps, and other steps will use the stale A/G and\n $A^{-1}/G^{-1}$ to update weights. Default: 100.\n\n Inputs:\n - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.\n\n Outputs:\n tuple[bool], all elements are True.\n\n Raises:\n TypeError: If `learning_rate` is not Tensor.\n TypeError: If `loss_scale`,`momentum` or `frequency` is not a float.\n TypeError: If `weight_decay` is neither float nor int.\n TypeError: If `use_nesterov` is not a bool.\n ValueError: If `loss_scale` is less than or equal to 0.\n ValueError: If `weight_decay` or `momentum` is less than 0.\n ValueError: If `frequency` is not int.\n ValueError: If `frequency` is less than 2.\n\n Supported Platforms:\n ``Ascend`` ``GPU``\n\n Examples:\n >>> from mindspore.nn import thor\n >>> from mindspore import Model\n >>> from mindspore import FixedLossScaleManager\n >>> from mindspore.train.callback import LossMonitor\n >>> from mindspore.train.train_thor import ConvertModelUtils\n >>> from mindspore import nn\n >>> from mindspore import Tensor\n >>>\n >>> net = Net()\n >>> dataset = create_dataset()\n >>> temp = Tensor([4e-4, 1e-4, 1e-5, 1e-5], mstype.float32)\n >>> optim = thor(net, learning_rate=temp, damping=temp, momentum=0.9, loss_scale=128, frequency=4)\n >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n >>> loss_scale = FixedLossScaleManager(128, drop_overflow_update=False)\n >>> model = Model(net, loss_fn=loss, optimizer=optim, loss_scale_manager=loss_scale, metrics={'acc'},\n ... amp_level=\"O2\", keep_batchnorm_fp32=False)\n >>> model = ConvertModelUtils.convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=optim,\n ... loss_scale_manager=loss_scale, metrics={'acc'},\n ... amp_level=\"O2\", keep_batchnorm_fp32=False)\n >>> loss_cb = LossMonitor()\n >>> model.train(1, dataset, callbacks=loss_cb, sink_size=4, dataset_sink_mode=True)\n\n \"\"\"\n context.set_context(max_call_depth=10000)\n ConvertNetUtils().convert_to_thor_net(net)\n if context.get_context(\"device_target\") == \"Ascend\":\n return ThorAscend(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size, decay_filter,\n split_indices=split_indices, enable_clip_grad=enable_clip_grad, frequency=frequency)\n return ThorGpu(net, learning_rate, damping, momentum, weight_decay, loss_scale, batch_size,\n use_nesterov, decay_filter, split_indices=split_indices, enable_clip_grad=enable_clip_grad,\n frequency=frequency)\n\n\nclass ThorGpu(Optimizer):\n \"\"\"\n ThorGpu\n \"\"\"\n def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,\n use_nesterov=False, decay_filter=lambda x: x.name not in [], split_indices=None,\n enable_clip_grad=False, frequency=100):\n params = filter(lambda x: x.requires_grad, net.get_parameters())\n super(ThorGpu, self).__init__(learning_rate, params, weight_decay, loss_scale)\n _check_param(momentum, frequency, learning_rate, self.__class__.__name__)\n self.momentum = Parameter(Tensor(momentum, mstype.float32), name=\"momentum\")\n self.params = self.parameters\n self.use_nesterov = Validator.check_bool(use_nesterov)\n self.moments = self.params.clone(prefix=\"moments\", init='zeros')\n self.hyper_map = C.HyperMap()\n self.opt = P.ApplyMomentum(use_nesterov=self.use_nesterov)\n self.net = net\n self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters()))\n self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters()))\n self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters()))\n self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters()))\n self.batch_size = Tensor(batch_size, mstype.float32)\n self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)\n self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)\n self.damping = damping\n self._define_gpu_operator()\n logger.info(\"matrix_a_cov len is {}\".format(len(self.matrix_a_cov)))\n self.thor = True\n self.matrix_a = ()\n self.matrix_g = ()\n self.matrix_a_shape = ()\n self.thor_layer_count = 0\n self.conv_layer_count = 0\n self.weight_fim_idx_map = ()\n self.weight_conv_idx_map = ()\n self.weight_layertype_idx_map = ()\n self._process_matrix_init_and_weight_idx_map(self.net)\n self.matrix_a = ParameterTuple(self.matrix_a)\n self.matrix_g = ParameterTuple(self.matrix_g)\n self.weight_decay = weight_decay\n self.decay_flags = tuple(decay_filter(x) for x in self.parameters)\n self.update_gradient = P.UpdateThorGradient(split_dim=self.split_dim)\n self.enable_clip_grad = enable_clip_grad\n self.frequency = frequency\n self._define_gpu_reducer(split_indices)\n\n def get_frequency(self):\n \"\"\"get thor frequency\"\"\"\n return self.frequency\n\n def _define_gpu_operator(self):\n \"\"\"define gpu operator\"\"\"\n self.transpose = P.Transpose()\n self.shape = P.Shape()\n self.reshape = P.Reshape()\n self.matmul = P.MatMul()\n self.assign = P.Assign()\n self.mul = P.Mul()\n self.gather = P.GatherV2()\n self.one = Tensor(1, mstype.int32)\n self.feature_map = Tensor(1.0, mstype.float32)\n self.axis = 0\n self.cov_step = Parameter(initializer(0, [1], mstype.int32), name=\"cov_step\", requires_grad=False)\n self.cast = P.Cast()\n self.sqrt = P.Sqrt()\n self.eye = P.Eye()\n self.split_dim = 128\n self.embedding_cholesky = P.CholeskyTrsm()\n self.cholesky = P.CholeskyTrsm(split_dim=self.split_dim)\n self.vector_matmul = P.BatchMatMul(transpose_a=True)\n self.reduce_sum = P.ReduceSum(keep_dims=False)\n self.inv = P.Reciprocal()\n self.square = P.Square()\n self.expand = P.ExpandDims()\n\n\n def _define_gpu_reducer(self, split_indices):\n \"\"\"define gpu reducer\"\"\"\n self.parallel_mode = context.get_auto_parallel_context(\"parallel_mode\")\n self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)\n if self.is_distributed:\n mean = _get_gradients_mean()\n degree = _get_device_num()\n if not split_indices:\n self.split_indices = [len(self.matrix_a_cov) - 1]\n else:\n self.split_indices = split_indices\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum6\")\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum8\")\n self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6)\n self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8)\n\n\n def _process_matrix_init_and_weight_idx_map(self, net):\n \"\"\"for GPU, process matrix init shape, and get weight idx map\"\"\"\n layer_type_map = get_net_layertype_mask(net)\n layer_counter = 0\n for idx in range(len(self.params)):\n layer_type = layer_type_map[layer_counter]\n weight = self.params[idx]\n weight_shape = self.shape(weight)\n if layer_type in [Conv, FC] and \"bias\" not in self.params[idx].name.lower():\n in_channels = weight_shape[1]\n out_channels = weight_shape[0]\n matrix_a_dim = in_channels\n if layer_type == Conv:\n matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3]\n matrix_g_dim = out_channels\n matrix_a_shape, matrix_g_shape = caculate_matmul_shape(matrix_a_dim, matrix_g_dim, self.split_dim)\n matrix_a_inv = Parameter(np.zeros(matrix_a_shape).astype(np.float32),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(np.zeros(matrix_g_shape).astype(np.float32),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.matrix_a = self.matrix_a + (matrix_a_inv,)\n self.matrix_g = self.matrix_g + (matrix_g_inv,)\n self.matrix_a_shape = self.matrix_a_shape + (matrix_a_shape,)\n elif layer_type == Embedding:\n vocab_size = weight_shape[0]\n embedding_size = weight_shape[1]\n matrix_a_inv = Parameter(Tensor(np.zeros([vocab_size]).astype(np.float32)),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float32)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.matrix_a = self.matrix_a + (matrix_a_inv,)\n self.matrix_g = self.matrix_g + (matrix_g_inv,)\n self.matrix_a_shape = self.matrix_a_shape + ((vocab_size,),)\n\n if layer_type in [Conv, FC, Embedding] and \"bias\" not in self.params[idx].name.lower():\n self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)\n self.thor_layer_count = self.thor_layer_count + 1\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,)\n if layer_type == Conv:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)\n self.conv_layer_count = self.conv_layer_count + 1\n else:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n else:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)\n if layer_type == LayerNorm:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,)\n else:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,)\n # bert.cls1.output_bias: not a network layer, only a trainable param\n if \"output_bias\" not in self.params[idx].name.lower():\n layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)\n\n def _get_ainv_ginv_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce):\n \"\"\"get matrixA inverse list and matrix G inverse list\"\"\"\n for i in range(len(self.params)):\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n if layer_type in [Conv, FC, Embedding]:\n g = gradients[i]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n damping_a = damping_step\n damping_g = damping_step\n feature_map = self.feature_map\n if layer_type == Conv:\n a_normalizer = self.a_normalizer[conv_layer_count]\n g_normalizer = self.g_normalizer[conv_layer_count]\n a_normalizer = F.depend(a_normalizer, g)\n g_normalizer = F.depend(g_normalizer, g)\n damping_a = self.mul(damping_step, 1.0 / a_normalizer)\n damping_g = self.mul(damping_step, 1.0 / g_normalizer)\n feature_map = self.sqrt(1.0 / a_normalizer)\n a_shape = self.shape(matrix_a)\n a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32)\n damping_a = self.sqrt(damping_a)\n damping_g = self.sqrt(damping_g)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[1], mstype.float32)\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping_g * g_eye\n if layer_type == Embedding:\n a_eye = P.OnesLike()(matrix_a)\n matrix_a = self.mul(matrix_a, 1.0 / self.batch_size)\n matrix_a = matrix_a + damping_a * a_eye\n matrix_a = self.inv(matrix_a)\n matrix_g = self.embedding_cholesky(matrix_g)\n matrix_g = self.matmul(matrix_g, matrix_g)\n else:\n matrix_a = matrix_a + damping_a * a_eye\n matrix_a = self.cholesky(matrix_a)\n matrix_a = self.vector_matmul(matrix_a, matrix_a)\n matrix_a = P.BroadcastTo(self.matrix_a_shape[thor_layer_count])(matrix_a)\n matrix_g = self.cholesky(matrix_g)\n matrix_g = self.vector_matmul(matrix_g, matrix_g)\n matrix_a = self.mul(matrix_a, feature_map)\n matrix_g = self.mul(matrix_g, feature_map)\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g,)\n return matrix_a_allreduce, matrix_g_allreduce\n\n def _process_layernorm(self, damping_step, gradient):\n \"\"\"process layernorm\"\"\"\n damping = self.sqrt(damping_step)\n normalizer = self.batch_size\n normalizer = self.cast(normalizer, mstype.float32)\n fim_cov = self.square(gradient)\n fim_cov = self.mul(fim_cov, 1.0 / normalizer)\n fim_cov = fim_cov + damping\n fim_inv = self.inv(fim_cov)\n gradient = self.mul(fim_inv, gradient)\n return gradient\n\n def _reshape_gradient(self, conv_layer_count, g, g_shape):\n \"\"\"reshape gradient\"\"\"\n if conv_layer_count != -1:\n g = self.reshape(g, g_shape)\n return g\n\n def construct(self, gradients):\n params = self.params\n moments = self.moments\n gradients = self.scale_grad(gradients)\n damping_step = self.gather(self.damping, self.cov_step, self.axis)\n damping_step = self.cast(damping_step, mstype.float32)\n new_grads = ()\n if self.thor:\n matrix_ainv_list = ()\n matrix_ginv_list = ()\n matrix_a_allreduce, matrix_g_allreduce = self._get_ainv_ginv_list(gradients, damping_step,\n matrix_ainv_list, matrix_ginv_list)\n if self.is_distributed:\n matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce)\n matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce)\n\n for i in range(len(self.params)):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n if layer_type in [Conv, FC]:\n g_shape = self.shape(g)\n g = self.reshape(g, (g_shape[0], -1))\n matrix_a = matrix_a_allreduce[thor_layer_count]\n matrix_g = matrix_g_allreduce[thor_layer_count]\n g = self.update_gradient(matrix_g, g, matrix_a)\n self.assign(self.matrix_a[thor_layer_count], matrix_a)\n self.assign(self.matrix_g[thor_layer_count], matrix_g)\n g = self._reshape_gradient(conv_layer_count, g, g_shape)\n elif layer_type == Embedding:\n matrix_a = matrix_a_allreduce[thor_layer_count]\n matrix_g = matrix_g_allreduce[thor_layer_count]\n self.assign(self.matrix_a[thor_layer_count], matrix_a)\n self.assign(self.matrix_g[thor_layer_count], matrix_g)\n temp_a = self.expand(matrix_a, 1)\n g = self.mul(temp_a, g)\n g = self.matmul(g, matrix_g)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n new_grads = new_grads + (g,)\n else:\n for j in range(len(self.params)):\n g = gradients[j]\n thor_layer_count = self.weight_fim_idx_map[j]\n conv_layer_count = self.weight_conv_idx_map[j]\n layer_type = self.weight_layertype_idx_map[j]\n if layer_type in [Conv, FC]:\n g_shape = self.shape(g)\n g = self.reshape(g, (g_shape[0], -1))\n matrix_a = self.matrix_a[thor_layer_count]\n matrix_g = self.matrix_g[thor_layer_count]\n g = self.update_gradient(matrix_g, g, matrix_a)\n g = self._reshape_gradient(conv_layer_count, g, g_shape)\n elif layer_type == Embedding:\n matrix_a = self.matrix_a[thor_layer_count]\n matrix_g = self.matrix_g[thor_layer_count]\n g = gradients[j]\n temp_a = self.expand(matrix_a, 1)\n g = self.mul(temp_a, g)\n g = self.matmul(g, matrix_g)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n new_grads = new_grads + (g,)\n gradients = new_grads\n\n self.cov_step = self.cov_step + self.one\n if self.weight_decay > 0:\n gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)\n gradients = clip_gradient(self.enable_clip_grad, gradients)\n lr = self.get_lr()\n success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)\n return success\n\n\nclass ThorAscend(Optimizer):\n \"\"\"ThorAscend\"\"\"\n\n def __init__(self, net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0, batch_size=32,\n decay_filter=lambda x: x.name not in [], split_indices=None, enable_clip_grad=False, frequency=100):\n params = filter(lambda x: x.requires_grad, net.get_parameters())\n super(ThorAscend, self).__init__(learning_rate, params, weight_decay, loss_scale)\n _check_param(momentum, frequency, learning_rate, self.__class__.__name__)\n self.momentum = Parameter(Tensor(momentum, mstype.float32), name=\"momentum\")\n self.params = self.parameters\n self.moments = self.params.clone(prefix=\"moments\", init='zeros')\n self.hyper_map = C.HyperMap()\n self.opt = P.ApplyMomentum()\n self.net = net\n self.matrix_a_cov = ParameterTuple(filter(lambda x: 'matrix_a' in x.name, net.get_parameters()))\n self.matrix_g_cov = ParameterTuple(filter(lambda x: 'matrix_g' in x.name, net.get_parameters()))\n self.a_normalizer = ParameterTuple(filter(lambda x: 'a_normalizer' in x.name, net.get_parameters()))\n self.g_normalizer = ParameterTuple(filter(lambda x: 'g_normalizer' in x.name, net.get_parameters()))\n logger.info(\"matrix_a_cov len is {}\".format(len(self.matrix_a_cov)))\n self._define_ascend_operator()\n self.C0 = 16\n self.device_shape_pad_flag = ()\n self.diag_block_dim = 128\n self.matrix_a = ()\n self.matrix_g = ()\n self.thor_layer_count = 0\n self.conv_layer_count = 0\n self.weight_conv_idx_map = ()\n self.weight_fim_idx_map = ()\n self.weight_layertype_idx_map = ()\n self.a_split_pad_dim_map = ()\n self.g_split_pad_dim_map = ()\n self.conv_matmul_support_map = ()\n self.batch_matmul_support_list = [1, 2, 4, 5, 6, 8, 9, 16, 18, 24, 32, 36]\n self.abs_max_support_list = [1, 2, 4, 8, 16, 5, 9, 18, 36, 32]\n self._process_matrix_init_and_weight_idx_map(self.net)\n self.matrix_a = ParameterTuple(self.matrix_a)\n self.matrix_g = ParameterTuple(self.matrix_g)\n self.matrix_max_inv = ()\n for i in range(len(self.matrix_a)):\n self.matrix_max_inv = self.matrix_max_inv + (\n Parameter(initializer(1, [1], mstype.float32), name=\"matrix_max\" + str(i), requires_grad=False),)\n self.matrix_max_inv = ParameterTuple(self.matrix_max_inv)\n self.thor = True\n self.weight_decay = weight_decay\n self.decay_flags = tuple(decay_filter(x) for x in self.parameters)\n self.damping = damping\n self.batch_size = Tensor(batch_size, mstype.float32)\n self.loss_scale = Tensor(1 / (loss_scale * loss_scale), mstype.float32)\n self.batch_size_scale = Tensor(batch_size * batch_size, mstype.float32)\n self.enable_clip_grad = enable_clip_grad\n self.frequency = frequency\n self._define_ascend_reducer(split_indices)\n\n\n def get_frequency(self):\n \"\"\"get thor frequency\"\"\"\n return self.frequency\n\n def _get_pad_dim(self, matrix_dim):\n \"\"\"get diag split pad dim \"\"\"\n split_pad_dim = 0\n if matrix_dim == 64:\n return split_pad_dim\n res = matrix_dim % self.diag_block_dim\n if res != 0:\n split_pad_dim = self.diag_block_dim - res\n return split_pad_dim\n\n def _define_ascend_operator(self):\n \"\"\"define ascend operator\"\"\"\n self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast()\n self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft()\n self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight()\n self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul()\n self.transpose = P.Transpose()\n self.shape = P.Shape()\n self.reshape = P.Reshape()\n self.mul = P.Mul()\n self.log = P.Log()\n self.exp = P.Exp()\n self.sqrt = P.Sqrt()\n self.gather = P.GatherV2()\n self.assign = P.Assign()\n self.cast = P.Cast()\n self.eye = P.Eye()\n self.concat = P.Concat(0)\n self.cholesky = P.CusCholeskyTrsm()\n self.vector_matmul = P.CusBatchMatMul()\n self.tbe_batch_matmul = P.BatchMatMul(transpose_a=True)\n self.fused_abs_max2 = P.CusFusedAbsMax1()\n self.matrix_combine = P.CusMatrixCombine()\n self.slice = P.Slice()\n self.expand = P.ExpandDims()\n self.reduce_sum = P.ReduceSum(keep_dims=False)\n self.square = P.Square()\n self.inv = P.Inv()\n self.matmul = P.MatMul()\n self.axis = 0\n self.one = Tensor(1, mstype.int32)\n self.cov_step = Parameter(initializer(0, [1], mstype.int32), name=\"cov_step\", requires_grad=False)\n\n def _define_ascend_reducer(self, split_indices):\n \"\"\"define ascend reducer\"\"\"\n self.parallel_mode = context.get_auto_parallel_context(\"parallel_mode\")\n self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)\n if self.is_distributed:\n mean = _get_gradients_mean()\n degree = _get_device_num()\n if not split_indices:\n self.split_indices = [len(self.matrix_a_cov) - 1]\n else:\n self.split_indices = split_indices\n if self.conv_layer_count > 0:\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum2\")\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum4\")\n self.grad_reducer_amax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=2)\n self.grad_reducer_gmax = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=4)\n\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum6\")\n auto_parallel_context().set_all_reduce_fusion_split_indices(self.split_indices, \"hccl_world_groupsum8\")\n self.grad_reducer_a = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=6)\n self.grad_reducer_g = DistributedGradReducer(self.matrix_a_cov, mean, degree, fusion_type=8)\n\n def _get_weight_idx_map(self, layer_type, idx, weight_shape):\n \"\"\"for Ascend, get weight idx map\"\"\"\n if layer_type in [Conv, FC, Embedding] and \"bias\" not in self.params[idx].name.lower():\n self.weight_fim_idx_map = self.weight_fim_idx_map + (self.thor_layer_count,)\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (layer_type,)\n if layer_type == Embedding:\n a_pad_dim = 0\n g_pad_dim = 0\n self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,)\n self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,)\n else:\n out_channels = weight_shape[0]\n g_pad_dim = self._get_pad_dim(out_channels)\n self.g_split_pad_dim_map = self.g_split_pad_dim_map + (g_pad_dim,)\n matrix_a_dim = weight_shape[1]\n if layer_type == Conv:\n matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3]\n a_pad_dim = self._get_pad_dim(matrix_a_dim)\n self.a_split_pad_dim_map = self.a_split_pad_dim_map + (a_pad_dim,)\n\n self.thor_layer_count = self.thor_layer_count + 1\n if layer_type == Conv:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (self.conv_layer_count,)\n self.conv_layer_count = self.conv_layer_count + 1\n else:\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n else:\n self.weight_fim_idx_map = self.weight_fim_idx_map + (-1,)\n self.weight_conv_idx_map = self.weight_conv_idx_map + (-1,)\n if layer_type == LayerNorm:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (LayerNorm,)\n else:\n self.weight_layertype_idx_map = self.weight_layertype_idx_map + (Other,)\n\n def _get_fc_matrix(self, weight_shape):\n \"\"\"for Ascend, get fc matrix_a and matrix_g\"\"\"\n out_channels = weight_shape[0]\n in_channels = weight_shape[1]\n if self.conv_layer_count > 0:\n if out_channels == 1001:\n fc_matrix_a = Parameter(Tensor(np.zeros([128, 128, 16, 16]).astype(np.float16)),\n name='matrix_a_inv_' + str(self.thor_layer_count),\n requires_grad=False)\n fc_matrix_g = Parameter(Tensor(np.zeros([63, 63, 16, 16]).astype(np.float16)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count),\n requires_grad=False)\n else:\n fc_matrix_a = Parameter(Tensor(np.eye(in_channels).astype(np.float16)),\n name='matrix_a_inv_' + str(self.thor_layer_count),\n requires_grad=False)\n fc_matrix_g = Parameter(Tensor(np.eye(out_channels).astype(np.float16)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count),\n requires_grad=False)\n self.matrix_a = self.matrix_a + (fc_matrix_a,)\n self.matrix_g = self.matrix_g + (fc_matrix_g,)\n\n def _process_matrix_init_and_weight_idx_map(self, net):\n \"\"\"for Ascend, process matrix init shape, and get weight idx map\"\"\"\n layer_counter = 0\n layer_type_map = get_net_layertype_mask(net)\n for idx in range(len(self.params)):\n layer_type = layer_type_map[layer_counter]\n weight = self.params[idx]\n weight_shape = self.shape(weight)\n if layer_type == Conv and \"bias\" not in self.params[idx].name.lower():\n in_channels = weight_shape[1]\n out_channels = weight_shape[0]\n matrix_a_dim = in_channels * weight_shape[2] * weight_shape[3]\n matrix_g_dim = out_channels\n matrix_a_device_shape, matrix_a_device_dim = caculate_device_shape(matrix_a_dim, in_channels, True)\n matrix_g_device_shape, matrix_g_device_dim = caculate_device_shape(matrix_g_dim, in_channels, False)\n ret = is_conv_matmul_support_shape(matrix_a_device_shape, matrix_g_device_shape)\n if ret:\n matrix_a_inv = Parameter(\n Tensor(np.reshape(np.identity(matrix_a_device_dim).astype(np.float16), matrix_a_device_shape)),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(\n Tensor(np.reshape(np.identity(matrix_g_device_dim).astype(np.float16), matrix_g_device_shape)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.conv_matmul_support_map = self.conv_matmul_support_map + (1,)\n else:\n matrix_a_inv = Parameter(Tensor(np.eye(matrix_a_dim).astype(np.float16)),\n name='matrix_a_inv_' + str(self.thor_layer_count), requires_grad=False)\n matrix_g_inv = Parameter(Tensor(np.eye(matrix_g_dim).astype(np.float16)),\n name=\"matrix_g_inv_\" + str(self.thor_layer_count), requires_grad=False)\n self.conv_matmul_support_map = self.conv_matmul_support_map + (0,)\n self.matrix_a = self.matrix_a + (matrix_a_inv,)\n self.matrix_g = self.matrix_g + (matrix_g_inv,)\n device_shape_pad_flag = False\n if matrix_a_dim != matrix_a_device_dim:\n device_shape_pad_flag = True\n self.device_shape_pad_flag = self.device_shape_pad_flag + (device_shape_pad_flag,)\n elif layer_type == FC and \"bias\" not in self.params[idx].name.lower():\n self._get_fc_matrix(weight_shape)\n self._get_weight_idx_map(layer_type, idx, weight_shape)\n # bert.cls1.output_bias: not a network layer, only a trainable param\n if \"output_bias\" not in self.params[idx].name.lower():\n layer_counter = get_layer_counter(layer_type, layer_counter, self.params, idx)\n\n def _process_batch_matmul(self, input_matrix):\n \"\"\"process batch matmul\"\"\"\n input_matrix_shape = self.shape(input_matrix)\n if input_matrix_shape[0] in self.batch_matmul_support_list:\n input_matrix = self.vector_matmul(input_matrix, input_matrix)\n else:\n input_matrix = self.tbe_batch_matmul(input_matrix, input_matrix)\n return input_matrix\n\n def _process_cholesky_pad(self, pad_dim, input_matrix, matrix_shape0):\n \"\"\"process cholesky pad\"\"\"\n if pad_dim > 0:\n matrix_sup = self.eye(pad_dim, pad_dim, mstype.float32)\n matrix_sup = P.Pad(((0, 0), (matrix_shape0, 0)))(matrix_sup)\n input_matrix = P.Pad(((0, 0), (0, pad_dim)))(input_matrix)\n input_matrix = self.concat((input_matrix, matrix_sup))\n return input_matrix\n\n\n def _get_abs_max(self, matrix_inv, origin_dim):\n \"\"\"get matrix abs max\"\"\"\n cholesky_shape = self.shape(matrix_inv)\n if cholesky_shape[0] in self.abs_max_support_list:\n matrix_inv_max = P.CusFusedAbsMax1([origin_dim, origin_dim])(matrix_inv)\n matrix_max = self.fused_abs_max2(matrix_inv_max)\n matrix_inv = self.matrix_combine(matrix_inv)\n else:\n matrix_inv = self.matrix_combine(matrix_inv)\n matrix_abs = P.Abs()(matrix_inv)\n matrix_max = P.ReduceMax(keep_dims=False)(matrix_abs)\n return matrix_max, matrix_inv\n\n\n def _get_fc_ainv_ginv(self, index, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce):\n \"\"\"get fc layer ainv and ginv\"\"\"\n thor_layer_count = self.weight_fim_idx_map[index]\n g = gradients[index]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n a_shape = self.shape(matrix_a)\n a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32)\n damping = self.sqrt(damping_step)\n matrix_a = matrix_a + damping * a_eye\n a_pad_dim = self.a_split_pad_dim_map[thor_layer_count]\n matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0])\n matrix_a_inv = self.cholesky(matrix_a)\n matrix_a_inv = self._process_batch_matmul(matrix_a_inv)\n\n weight_shape = self.shape(self.params[index])\n out_channels = weight_shape[0]\n in_channels = weight_shape[1]\n if out_channels == 2:\n matrix_a_inv = self.matrix_combine(matrix_a_inv)\n matrix_g_inv = g_eye\n else:\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping * g_eye\n g_pad_dim = self.g_split_pad_dim_map[thor_layer_count]\n matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0])\n matrix_g_inv = self.cholesky(matrix_g)\n matrix_g_inv = self._process_batch_matmul(matrix_g_inv)\n if self.conv_layer_count > 0:\n a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, in_channels)\n g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels)\n a_max = F.depend(a_max, g)\n g_max = F.depend(g_max, g)\n matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,)\n matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,)\n else:\n matrix_a_inv = self.matrix_combine(matrix_a_inv)\n matrix_g_inv = self.matrix_combine(matrix_g_inv)\n\n if a_pad_dim > 0:\n matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (in_channels, in_channels))\n if g_pad_dim > 0:\n matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels))\n matrix_a_inv_shape = self.shape(matrix_a_inv)\n matrix_g_combine_shape = self.shape(matrix_g_inv)\n if matrix_a_inv_shape[0] == 2048 and matrix_g_combine_shape[0] == 1001:\n matrix_a_inv = self.reshape(matrix_a_inv,\n (matrix_a_inv_shape[0] / 16, 16,\n matrix_a_inv_shape[0] / 16, 16))\n matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3))\n matrix_g_inv = P.Pad(((0, 7), (0, 7)))(matrix_g_inv)\n\n matrix_g_inv_shape = self.shape(matrix_g_inv)\n matrix_g_inv = self.reshape(matrix_g_inv,\n (matrix_g_inv_shape[0] / 16, 16,\n matrix_g_inv_shape[0] / 16, 16))\n matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3))\n\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,)\n return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce\n\n def _process_conv_matmul_device_pad(self, conv_layer_count, weight_shape, matrix_a_inv):\n \"\"\"process conv matmul device pad\"\"\"\n if self.device_shape_pad_flag[conv_layer_count]:\n kernel_hw = weight_shape[2] * weight_shape[3]\n in_channels = weight_shape[1]\n matrix_a_inv = self.reshape(matrix_a_inv, (kernel_hw, in_channels, kernel_hw, in_channels))\n matrix_a_inv = P.Pad(((0, 0), (0, self.C0 - in_channels), (0, 0),\n (0, self.C0 - in_channels)))(matrix_a_inv)\n return matrix_a_inv\n\n\n def _get_ainv_ginv_amax_gmax_list(self, gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce):\n \"\"\"get matrixA inverse list, matrixG inverse list, matrixA_max list, matrixG_max list\"\"\"\n for i in range(len(self.params)):\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n weight_shape = self.shape(self.params[i])\n out_channels = weight_shape[0]\n if layer_type == Conv:\n g = gradients[i]\n matrix_a_dim = weight_shape[1] * weight_shape[2] * weight_shape[3]\n matmul_support_flag = self.conv_matmul_support_map[conv_layer_count]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n a_shape = self.shape(matrix_a)\n a_eye = self.eye(a_shape[0], a_shape[0], mstype.float32)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32)\n a_normalizer = self.a_normalizer[conv_layer_count]\n g_normalizer = self.g_normalizer[conv_layer_count]\n a_normalizer = F.depend(a_normalizer, g)\n g_normalizer = F.depend(g_normalizer, g)\n damping_a = self.mul(damping_step, self.batch_size / a_normalizer)\n damping_g = self.mul(damping_step, self.batch_size / g_normalizer)\n damping_a = self.sqrt(damping_a)\n matrix_a = matrix_a + damping_a * a_eye\n a_pad_dim = self.a_split_pad_dim_map[thor_layer_count]\n matrix_a = self._process_cholesky_pad(a_pad_dim, matrix_a, a_shape[0])\n matrix_a_inv = self.cholesky(matrix_a)\n matrix_a_inv = self._process_batch_matmul(matrix_a_inv)\n a_max, matrix_a_inv = self._get_abs_max(matrix_a_inv, matrix_a_dim)\n\n damping_g = self.sqrt(damping_g)\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping_g * g_eye\n g_pad_dim = self.g_split_pad_dim_map[thor_layer_count]\n matrix_g = self._process_cholesky_pad(g_pad_dim, matrix_g, g_shape[0])\n matrix_g_inv = self.cholesky(matrix_g)\n matrix_g_inv = self._process_batch_matmul(matrix_g_inv)\n g_max, matrix_g_inv = self._get_abs_max(matrix_g_inv, out_channels)\n\n if a_pad_dim > 0:\n matrix_a_inv = self.slice(matrix_a_inv, (0, 0), (matrix_a_dim, matrix_a_dim))\n if g_pad_dim > 0:\n matrix_g_inv = self.slice(matrix_g_inv, (0, 0), (out_channels, out_channels))\n\n if matmul_support_flag == 1:\n matrix_a_inv = self._process_conv_matmul_device_pad(conv_layer_count, weight_shape, matrix_a_inv)\n matrix_a_inv_shape = self.shape(self.matrix_a[thor_layer_count])\n matrix_a_device_temp_shape = (matrix_a_inv_shape[0], matrix_a_inv_shape[2],\n matrix_a_inv_shape[1], matrix_a_inv_shape[3])\n matrix_a_inv = self.reshape(matrix_a_inv, matrix_a_device_temp_shape)\n matrix_a_inv = self.transpose(matrix_a_inv, (2, 0, 1, 3))\n matrix_g_inv_shape = self.shape(self.matrix_g[thor_layer_count])\n matrix_g_device_temp_shape = (matrix_g_inv_shape[0], matrix_g_inv_shape[2],\n matrix_g_inv_shape[1], matrix_g_inv_shape[3])\n matrix_g_inv = self.reshape(matrix_g_inv, matrix_g_device_temp_shape)\n matrix_g_inv = self.transpose(matrix_g_inv, (2, 0, 1, 3))\n\n a_max = F.depend(a_max, g)\n g_max = F.depend(g_max, g)\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,)\n matrix_a_max_allreduce = matrix_a_max_allreduce + (a_max,)\n matrix_g_max_allreduce = matrix_g_max_allreduce + (g_max,)\n elif layer_type == FC:\n matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \\\n self._get_fc_ainv_ginv(i, damping_step, gradients, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce)\n elif layer_type == Embedding:\n g = gradients[i]\n matrix_a = self.matrix_a_cov[thor_layer_count]\n matrix_g = self.matrix_g_cov[thor_layer_count]\n matrix_a = F.depend(matrix_a, g)\n matrix_g = F.depend(matrix_g, g)\n g_shape = self.shape(matrix_g)\n g_eye = self.eye(g_shape[0], g_shape[0], mstype.float32)\n damping = self.sqrt(damping_step)\n a_eye = P.OnesLike()(matrix_a)\n matrix_a = self.mul(matrix_a, 1.0 / self.batch_size)\n matrix_a = matrix_a + damping * a_eye\n matrix_a_inv = self.inv(matrix_a)\n matrix_g = self.mul(matrix_g, self.loss_scale)\n matrix_g = self.mul(matrix_g, self.batch_size_scale)\n matrix_g = matrix_g + damping * g_eye\n matrix_g_inv = self.cholesky(matrix_g)\n matrix_g_inv = self._process_batch_matmul(matrix_g_inv)\n matrix_g_inv = self.matrix_combine(matrix_g_inv)\n matrix_a_allreduce = matrix_a_allreduce + (matrix_a_inv,)\n matrix_g_allreduce = matrix_g_allreduce + (matrix_g_inv,)\n return matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce\n\n def _process_layernorm(self, damping_step, gradient):\n \"\"\"process layernorm layer for thor\"\"\"\n damping = self.sqrt(damping_step)\n normalizer = self.cast(self.batch_size, mstype.float32)\n fim_cov = self.square(gradient)\n fim_cov = self.mul(fim_cov, 1.0 / normalizer)\n fim_cov = fim_cov + damping\n fim_inv = self.inv(fim_cov)\n gradient = self.mul(fim_inv, gradient)\n return gradient\n\n def _process_thor_fc(self, thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g):\n \"\"\"process thor graph fc layer\"\"\"\n temp_a = matrix_a_allreduce[thor_layer_count]\n temp_g = matrix_g_allreduce[thor_layer_count]\n self.assign(self.matrix_a_cov[thor_layer_count], temp_a)\n self.assign(self.matrix_g_cov[thor_layer_count], temp_g)\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n return g\n\n def _get_second_gradients_one(self, params_len, gradients, new_grads):\n \"\"\"get second gradients one\"\"\"\n for i in range(params_len):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n conv_layer_count = self.weight_conv_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n matrix_a = self.matrix_a[thor_layer_count]\n matrix_g = self.matrix_g[thor_layer_count]\n matrix_max = self.matrix_max_inv[thor_layer_count]\n grad_shape = self.shape(g)\n if layer_type == FC:\n if grad_shape[0] == 1001:\n g = self.cube_matmul_left_fc(matrix_g, g)\n g = self.cube_matmul_right_fc(g, matrix_a, matrix_max)\n else:\n temp_a = self.cast(matrix_a, mstype.float16)\n temp_g = self.cast(matrix_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, matrix_max)\n elif layer_type == Conv:\n matmul_support_flag = self.conv_matmul_support_map[conv_layer_count]\n if matmul_support_flag == 1:\n g = self.cube_matmul_left(matrix_g, g)\n g = self.cube_matmul_right_mul(g, matrix_a, matrix_max)\n else:\n g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3]))\n temp_a = self.cast(matrix_a, mstype.float16)\n temp_g = self.cast(matrix_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, matrix_max)\n g = self.reshape(g, grad_shape)\n new_grads = new_grads + (g,)\n return new_grads\n\n def _get_second_gradients(self, new_grads, damping_step, gradients):\n \"\"\"get second gradients for thor\"\"\"\n params_len = len(self.params)\n if self.conv_layer_count > 0:\n new_grads = self._get_second_gradients_one(params_len, gradients, new_grads)\n else:\n for i in range(params_len):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n layer_type = self.weight_layertype_idx_map[i]\n if layer_type == Embedding:\n temp_a_ori = self.matrix_a_cov[thor_layer_count]\n temp_g = self.matrix_g_cov[thor_layer_count]\n temp_a = self.expand(temp_a_ori, 1)\n g = self.mul(temp_a, g)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(g, temp_g)\n g = self.cast(g, mstype.float32)\n elif layer_type == FC:\n temp_a = self.matrix_a_cov[thor_layer_count]\n temp_g = self.matrix_g_cov[thor_layer_count]\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n new_grads = new_grads + (g,)\n return new_grads\n\n def _get_second_grad_by_matmul(self, index, temp_a, temp_g, g, temp_max):\n \"\"\"get second gradient by matmul\"\"\"\n conv_layer_count = self.weight_conv_idx_map[index]\n layer_type = self.weight_layertype_idx_map[index]\n grad_shape = self.shape(g)\n if layer_type == FC:\n if grad_shape[0] == 1001:\n g = self.cube_matmul_left_fc(temp_g, g)\n g = self.cube_matmul_right_fc(g, temp_a, temp_max)\n else:\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, temp_max)\n elif layer_type == Conv:\n a_normalizer = self.a_normalizer[conv_layer_count]\n a_normalizer = F.depend(a_normalizer, g)\n temp_max = self.mul(temp_max, self.batch_size / a_normalizer)\n matmul_support_flag = self.conv_matmul_support_map[conv_layer_count]\n if matmul_support_flag == 1:\n g = self.cube_matmul_left(temp_g, g)\n g = self.cube_matmul_right_mul(g, temp_a, temp_max)\n else:\n g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3]))\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(temp_g, g)\n g = self.matmul(g, temp_a)\n g = self.cast(g, mstype.float32)\n g = self.mul(g, temp_max)\n g = self.reshape(g, grad_shape)\n return g, temp_max\n\n def _get_second_grad_by_layertype(self, index, matrix_a_allreduce, matrix_g_allreduce, g, damping_step):\n \"\"\"get second gradient by layertype\"\"\"\n thor_layer_count = self.weight_fim_idx_map[index]\n layer_type = self.weight_layertype_idx_map[index]\n if layer_type == Embedding:\n temp_a_ori = matrix_a_allreduce[thor_layer_count]\n temp_g = matrix_g_allreduce[thor_layer_count]\n self.assign(self.matrix_a_cov[thor_layer_count], temp_a_ori)\n self.assign(self.matrix_g_cov[thor_layer_count], temp_g)\n temp_a = self.expand(temp_a_ori, 1)\n g = self.mul(temp_a, g)\n temp_g = self.cast(temp_g, mstype.float16)\n g = self.cast(g, mstype.float16)\n g = self.matmul(g, temp_g)\n g = self.cast(g, mstype.float32)\n elif layer_type == FC:\n g = self._process_thor_fc(thor_layer_count, matrix_a_allreduce, matrix_g_allreduce, g)\n elif layer_type == LayerNorm:\n g = self._process_layernorm(damping_step, g)\n return g\n\n def construct(self, gradients):\n params = self.params\n moments = self.moments\n gradients = self.scale_grad(gradients)\n damping_step = self.gather(self.damping, self.cov_step, self.axis)\n damping_step = self.cast(damping_step, mstype.float32)\n if self.thor:\n matrix_a_allreduce = ()\n matrix_g_allreduce = ()\n matrix_a_max_allreduce = ()\n matrix_g_max_allreduce = ()\n matrix_a_allreduce, matrix_g_allreduce, matrix_a_max_allreduce, matrix_g_max_allreduce = \\\n self._get_ainv_ginv_amax_gmax_list(gradients, damping_step, matrix_a_allreduce, matrix_g_allreduce,\n matrix_a_max_allreduce, matrix_g_max_allreduce)\n if self.is_distributed:\n matrix_a_allreduce = self.grad_reducer_a(matrix_a_allreduce)\n matrix_g_allreduce = self.grad_reducer_g(matrix_g_allreduce)\n if self.conv_layer_count > 0:\n matrix_a_max_allreduce = self.grad_reducer_amax(matrix_a_max_allreduce)\n matrix_g_max_allreduce = self.grad_reducer_gmax(matrix_g_max_allreduce)\n\n new_grads = ()\n if self.conv_layer_count > 0:\n for i in range(len(self.params)):\n g = gradients[i]\n thor_layer_count = self.weight_fim_idx_map[i]\n temp_a = matrix_a_allreduce[thor_layer_count]\n temp_g = matrix_g_allreduce[thor_layer_count]\n matrix_a_inv_max = self.log(matrix_a_max_allreduce[thor_layer_count])\n matrix_a_inv_max = self.mul(matrix_a_inv_max, -1)\n matrix_a_inv_max = self.exp(matrix_a_inv_max)\n temp_a = self.mul(temp_a, matrix_a_inv_max)\n matrix_g_inv_max = self.log(matrix_g_max_allreduce[thor_layer_count])\n matrix_g_inv_max = self.mul(matrix_g_inv_max, -1)\n matrix_g_inv_max = self.exp(matrix_g_inv_max)\n temp_g = self.mul(temp_g, matrix_g_inv_max)\n temp_max = self.mul(matrix_g_max_allreduce[thor_layer_count],\n matrix_g_max_allreduce[thor_layer_count])\n temp_a = self.cast(temp_a, mstype.float16)\n temp_g = self.cast(temp_g, mstype.float16)\n g, temp_max = self._get_second_grad_by_matmul(i, temp_a, temp_g, g, temp_max)\n self.assign(self.matrix_a[thor_layer_count], temp_a)\n self.assign(self.matrix_g[thor_layer_count], temp_g)\n self.assign(self.matrix_max_inv[thor_layer_count], temp_max)\n new_grads = new_grads + (g,)\n gradients = new_grads\n else:\n for i in range(len(self.params)):\n g = gradients[i]\n g = self._get_second_grad_by_layertype(i, matrix_a_allreduce, matrix_g_allreduce, g, damping_step)\n new_grads = new_grads + (g,)\n gradients = new_grads\n else:\n new_grads = ()\n gradients = self._get_second_gradients(new_grads, damping_step, gradients)\n\n self.cov_step = self.cov_step + self.one\n if self.weight_decay > 0:\n gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients)\n gradients = clip_gradient(self.enable_clip_grad, gradients)\n lr = self.get_lr()\n success = self.hyper_map(F.partial(_momentum_opt, self.opt, self.momentum, lr), gradients, params, moments)\n return success\n", "# Copyright 2019-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nThis dataset module supports various formats of datasets, including ImageNet, TFData,\nMNIST, Cifar10/100, Manifest, MindRecord, and more. This module loads data with\nhigh performance and parses data precisely. Some of the operations that are\nprovided to users to preprocess data include shuffle, batch, repeat, map, and zip.\n\"\"\"\nimport atexit\nimport glob\nimport json\nimport math\nimport os\nimport signal\nimport stat\nimport time\nimport uuid\nimport multiprocessing\nfrom multiprocessing.pool import RUN\nimport queue\nfrom enum import Enum\nfrom functools import partial\nfrom importlib import import_module\nimport sys\nimport threading\n\nimport copy\nimport weakref\nimport platform\nimport psutil\nimport numpy as np\nfrom scipy.io import loadmat\nfrom PIL import Image\n\nimport mindspore._c_dataengine as cde\nfrom mindspore._c_expression import typing\n\nfrom mindspore import Tensor\nfrom mindspore import log as logger\nfrom mindspore.parallel._ps_context import _is_role_pserver, _is_role_sched\nfrom mindspore.parallel._utils import _get_device_num\n\nimport mindspore.dataset.transforms.py_transforms as py_transforms\n\nfrom . import samplers\nfrom .iterators import DictIterator, TupleIterator, DummyIterator, check_iterator_cleanup, _set_iterator_cleanup, \\\n ITERATORS_LIST, _unset_iterator_cleanup\nfrom .queue import _SharedQueue\nfrom .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \\\n check_rename, check_numpyslicesdataset, check_device_send, check_take, check_project, check_imagefolderdataset, \\\n check_mnist_cifar_dataset, check_manifestdataset, check_tfrecorddataset, check_vocdataset, check_cocodataset, \\\n check_celebadataset, check_minddataset, check_generatordataset, check_sync_wait, check_zip_dataset, \\\n check_add_column, check_textfiledataset, check_concat, check_random_dataset, check_split, \\\n check_bucket_batch_by_length, check_cluedataset, check_save, check_csvdataset, check_paddeddataset, \\\n check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_flickr_dataset, \\\n check_sb_dataset, check_flowers102dataset, check_cityscapes_dataset, check_usps_dataset, check_div2k_dataset, \\\n check_sbu_dataset, check_qmnist_dataset, check_emnist_dataset, check_fake_image_dataset, check_places365_dataset, \\\n check_photo_tour_dataset\nfrom ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers, \\\n get_prefetch_size\nfrom ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist\nfrom ..core.validator_helpers import replace_none\nfrom ..core.py_util_helpers import ExceptionHandler\nfrom ..transforms.py_transforms_util import FuncWrapper\n\ntry:\n context = import_module(\"mindspore.context\")\nexcept ModuleNotFoundError:\n context = None\n\n\nclass Shuffle(str, Enum):\n GLOBAL: str = \"global\"\n FILES: str = \"files\"\n INFILE: str = \"infile\"\n\n\nShuffleToShuffleMode = {Shuffle.FILES: cde.ShuffleMode.FILES,\n Shuffle.GLOBAL: cde.ShuffleMode.GLOBAL,\n Shuffle.INFILE: cde.ShuffleMode.INFILE}\n\n\ndef shuffle_to_shuffle_mode(shuffle):\n \"\"\"\n Shuffle Enum to Shuffle Mode\n\n Args:\n shuffle (Shuffle): shuffle flag to shuffle mode in C layer\n\n Returns:\n ShuffleMode, shuffle mode\n \"\"\"\n shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle\n if not isinstance(shuffle, Shuffle):\n if shuffle is None or shuffle:\n shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle\n else:\n shuffle_mode = cde.ShuffleMode.FALSE # No shuffle\n else:\n shuffle_mode = ShuffleToShuffleMode[shuffle]\n return shuffle_mode\n\n\ndef shuffle_to_bool(shuffle):\n \"\"\"\n Shuffle Enum to bool\n\n Args:\n shuffle (Shuffle): shuffle flag to bool\n\n Returns:\n bool, True / False\n \"\"\"\n shuffle_bool = True\n if not isinstance(shuffle, Shuffle):\n if shuffle is None:\n shuffle_bool = None\n elif shuffle:\n shuffle_bool = True\n else:\n shuffle_bool = False\n else:\n shuffle_bool = True\n return shuffle_bool\n\n\n@check_zip\ndef zip(datasets):\n \"\"\"\n Zip the datasets in the input tuple of datasets.\n\n Args:\n datasets (tuple of class Dataset): A tuple of datasets to be zipped together.\n The number of datasets must be more than 1.\n\n Returns:\n ZipDataset, dataset zipped.\n\n Raises:\n ValueError: If the number of datasets is 1.\n TypeError: If datasets is not a tuple.\n\n Examples:\n >>> # Create a dataset which is the combination of dataset_1 and dataset_2\n >>> dataset = ds.zip((dataset_1, dataset_2))\n \"\"\"\n if len(datasets) <= 1:\n raise ValueError(\n \"Can't zip empty or just one dataset!\")\n for dataset in datasets:\n if not isinstance(dataset, Dataset):\n raise TypeError(\"Invalid dataset, expected Dataset object, but got %s!\" % type(dataset))\n return ZipDataset(datasets)\n\n\ndef _get_operator_process():\n \"\"\"\n Inner implemented method, mainly for passing sub-process id in C layer\n\n Returns:\n dict, mapping dict of operator id and corresponding process id.\n \"\"\"\n global _OP_PROCESS\n process_info = _OP_PROCESS\n op_process = dict()\n keys = process_info.keys()\n fetched_all = True\n for key in keys:\n op_process[key] = list(process_info[key][1])\n item_full = (len(process_info[key][1]) == process_info[key][0])\n fetched_all = fetched_all and item_full\n return op_process, fetched_all\n\n\ndef _set_dataset_permissions(file_name, num_files):\n \"\"\"\n set saved dataset files' permissions to 600\n the rule of dataset filenames should be the same as those in C++.\n \"\"\"\n num_digits = len(str(num_files - 1))\n if num_files == 1:\n paths = [file_name]\n else:\n paths = [\"{}{}\".format(file_name, str(x).rjust(num_digits, '0')) for x in range(num_files)]\n\n for item in paths:\n if os.path.exists(item):\n os.chmod(item, stat.S_IRUSR | stat.S_IWUSR)\n index_file = item + \".db\"\n if os.path.exists(index_file):\n os.chmod(index_file, stat.S_IRUSR | stat.S_IWUSR)\n\n\nclass Dataset:\n \"\"\"\n Abstract class to represent a dataset in DataEngine's data pipeline.\n\n This class is the base class of SourceDataset and Dataset, and represents\n a node in the data flow graph.\n\n Args:\n num_parallel_workers (int, optional): Number of workers to process the dataset in parallel\n (default=None).\n \"\"\"\n\n def __init__(self, children=None, num_parallel_workers=None, cache=None):\n # Note: children and parent are internal variables, not recommended for external using.\n self.children = replace_none(children, [])\n if isinstance(self.children, tuple):\n self.children = list(self.children)\n if not isinstance(self.children, list):\n self.children = [self.children]\n\n self.parent = []\n for child in self.children:\n child.parent.append(weakref.ref(self))\n self.num_parallel_workers = num_parallel_workers\n self.cache = cache\n\n self._device_iter = 0\n self._input_indexs = ()\n self.saved_output_types = None\n self.saved_output_shapes = None\n self.dynamic_setting = [False, None]\n self.saved_min_shapes = None\n self.saved_max_shapes = None\n self._col_names = None\n self.dataset_size = None\n self._batch_size = None\n self._num_classes = None\n self._repeat_count = None\n self._class_indexing = None\n self._sync = False\n\n def create_ir_tree(self):\n \"\"\"\n Internal method to build an IR tree.\n\n Returns:\n DatasetNode, the root node of the IR tree.\n Dataset, the root dataset of the IR tree.\n \"\"\"\n parent = self.parent\n self.parent = []\n dataset = copy.deepcopy(self)\n global _OP_NAME\n _OP_NAME = Dataset._get_operator_id(dataset)\n ir_tree = dataset.parse_tree()\n self.parent = parent\n _init_device_info()\n return ir_tree, dataset\n\n def close_pool(self):\n \"\"\"\n Close multiprocessing pool in dataset. If you are familiar with multiprocessing library, you can regard this\n as a destructor for a processingPool object.\n \"\"\"\n if hasattr(self, 'process_pool') and self.process_pool is not None:\n self.process_pool.close()\n for child in self.children:\n child.close_pool()\n\n def notify_watchdog(self):\n if hasattr(self, 'sample_fn') and self.sample_fn is not None:\n if self.sample_fn.multi_process:\n self.sample_fn._abort_watchdog() # pylint: disable=W0212\n if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:\n self._abort_watchdog()\n for child in self.children:\n child.notify_watchdog()\n\n @staticmethod\n def _get_operator_id(dataset):\n \"\"\"\n Internal method to iterate the tree and obtain op_id of each operator.\n\n Returns:\n Dataset, the root dataset of the tree.\n \"\"\"\n op_name = dict()\n generator_process = dict()\n op_name[str(dataset)] = 0\n op_id = 1\n\n def process_name(datasets, operator_id):\n if not datasets:\n return 0\n temp = []\n for item in datasets:\n for d in item.children:\n temp.append(d)\n op_name[str(d)] = operator_id\n if isinstance(d, GeneratorDataset) and d.sample_fn and d.sample_fn.pids:\n generator_process[operator_id] = [d.num_parallel_workers, set(d.sample_fn.pids)]\n\n operator_id = operator_id + 1\n return process_name(temp, operator_id)\n\n process_name([dataset], op_id)\n if generator_process:\n global _OP_PROCESS\n _OP_PROCESS.update(generator_process)\n return op_name\n\n def parse_tree(self):\n \"\"\"\n Internal method to parse the API tree into an IR tree.\n\n Returns:\n DatasetNode, the root node of the IR tree.\n \"\"\"\n if len(self.parent) > 1:\n raise ValueError(\"The data pipeline is not a tree (i.e., one node has 2 consumers)\")\n ir_children = [d.parse_tree() for d in self.children]\n # Bootstrap can only be performed on a copy of the original dataset node.\n # Bootstrap on original dataset node will make all iterators share the same process pool\n self.iterator_bootstrap()\n ir_node = self.parse(ir_children)\n ir_node = self.post_parse(ir_node)\n return ir_node\n\n def __safe_deepcopy__(self, memodict, exclude=()):\n if id(self) in memodict:\n return memodict[id(self)]\n cls = self.__class__\n new_op = cls.__new__(cls)\n memodict[id(self)] = new_op\n for arg, value in self.__dict__.items():\n if arg in exclude:\n setattr(new_op, arg, value)\n else:\n try:\n setattr(new_op, arg, copy.deepcopy(value, memodict))\n except TypeError:\n setattr(new_op, arg, value)\n return new_op\n\n def iterator_bootstrap(self):\n pass\n\n @staticmethod\n def _noop_mode():\n if _is_role_sched() or _is_role_pserver():\n return True\n return False\n\n def __add__(self, datasets):\n return self.concat(datasets)\n\n def to_json(self, filename=\"\"):\n \"\"\"\n Serialize a pipeline into JSON string and dump into file if filename is provided.\n\n Args:\n filename (str): filename of JSON file to be saved as.\n\n Returns:\n str, JSON string of the pipeline.\n \"\"\"\n ir_tree, _ = self.create_ir_tree()\n return json.loads(ir_tree.to_json(filename))\n\n @check_bucket_batch_by_length\n def bucket_batch_by_length(self, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function=None,\n pad_info=None, pad_to_bucket_boundary=False, drop_remainder=False):\n \"\"\"\n Bucket elements according to their lengths. Each bucket will be padded and batched when\n they are full.\n\n A length function is called on each row in the dataset. The row is then\n bucketed based on its length and bucket boundaries. When a bucket reaches its\n corresponding size specified in bucket_batch_sizes, the entire bucket will be\n padded according to batch_info, and then form a batch.\n Each batch will be full, except one special case: the last batch for each bucket may not be full.\n\n Args:\n column_names (list[str]): Columns passed to element_length_function.\n bucket_boundaries (list[int]): A list consisting of the upper boundaries\n of the buckets. Must be strictly increasing. If there are n boundaries,\n n+1 buckets are created: One bucket for [0, bucket_boundaries[0]), one\n bucket for [bucket_boundaries[i], bucket_boundaries[i+1]) for each\n 0<i<n-1, and last bucket for [bucket_boundaries[n-1], inf).\n bucket_batch_sizes (list[int]): A list consisting of the batch sizes for\n each bucket. Must contain len(bucket_boundaries)+1 elements.\n element_length_function (Callable, optional): A function that takes in\n M arguments where M = len(column_names) and returns an integer. If no value\n provided, parameter M the len(column_names) must be 1, and the size of the first\n dimension of that column will be taken as the length (default=None).\n pad_info (dict, optional): The information about how to batch each column. The key\n corresponds to the column name, and the value must be a tuple of 2 elements.\n The first element corresponds to the shape to pad to, and the second\n element corresponds to the value to pad with. If a column is not\n specified, then that column will be padded to the longest in the current\n batch, and 0 will be used as the padding value. Any None dimensions will\n be padded to the longest in the current batch, unless if\n pad_to_bucket_boundary is True. If no padding is wanted, set pad_info\n to None (default=None).\n pad_to_bucket_boundary (bool, optional): If True, will pad each None\n dimension in pad_info to the bucket_boundary minus 1. If there are any\n elements that fall into the last bucket, an error will occur\n (default=False).\n drop_remainder (bool, optional): If True, will drop the last batch for each\n bucket if it is not a full batch (default=False).\n\n Returns:\n BucketBatchByLengthDataset, dataset bucketed and batched by length.\n\n Examples:\n >>> # Create a dataset where certain counts rows are combined into a batch\n >>> # and drops the last incomplete batch if there is one.\n >>> import numpy as np\n >>> def generate_2_columns(n):\n ... for i in range(n):\n ... yield (np.array([i]), np.array([j for j in range(i + 1)]))\n >>>\n >>> column_names = [\"col1\", \"col2\"]\n >>> dataset = ds.GeneratorDataset(generate_2_columns(8), column_names)\n >>> bucket_boundaries = [5, 10]\n >>> bucket_batch_sizes = [2, 1, 1]\n >>> element_length_function = (lambda col1, col2: max(len(col1), len(col2)))\n >>> # Will pad col2 to shape [bucket_boundaries[i]] where i is the\n >>> # index of the bucket that is currently being batched.\n >>> pad_info = {\"col2\": ([None], -1)}\n >>> pad_to_bucket_boundary = True\n >>> dataset = dataset.bucket_batch_by_length(column_names, bucket_boundaries,\n ... bucket_batch_sizes,\n ... element_length_function, pad_info,\n ... pad_to_bucket_boundary)\n \"\"\"\n return BucketBatchByLengthDataset(self, column_names, bucket_boundaries, bucket_batch_sizes,\n element_length_function, pad_info, pad_to_bucket_boundary, drop_remainder)\n\n @check_batch\n def batch(self, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,\n input_columns=None, output_columns=None, column_order=None, pad_info=None,\n python_multiprocessing=False, max_rowsize=16):\n \"\"\"\n Combine batch_size number of consecutive rows into batches.\n\n For any child node, a batch is treated as a single row.\n For any column, all the elements within that column must have the same shape.\n If a per_batch_map callable is provided, it will be applied to the batches of tensors.\n\n Note:\n The order of using repeat and batch reflects the number of batches and per_batch_map.\n It is recommended that the repeat operation applied after the batch operation finished.\n\n Args:\n batch_size (int or function): The number of rows each batch is created with. An\n int or callable object which takes exactly 1 parameter, BatchInfo.\n drop_remainder (bool, optional): Determines whether or not to drop the last block\n whose data row number is less than batch size (default=False). If True, and if there are less\n than batch_size rows available to make the last batch, then those rows will\n be dropped and not propagated to the child node.\n num_parallel_workers (int, optional): Number of workers(threads) to process the dataset in parallel\n (default=None).\n per_batch_map (callable, optional): Per batch map callable. A callable which takes\n (list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch\n of Tensors on a given column. The number of lists should match with number of entries in input_columns.\n The last parameter of the callable should always be a BatchInfo object. Per_batch_map should return\n (list[Tensor], list[Tensor], ...). The length of each list in output should be same as the input.\n output_columns is required if the number of output lists is different from input.\n input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list\n should match with signature of per_batch_map callable (default=None).\n output_columns (Union[str, list[str]], optional): List of names assigned to the columns\n outputted by the last operation. This parameter is mandatory if len(input_columns) !=\n len(output_columns). The size of this list must match the number of output\n columns of the last operation. (default=None, output columns will have the same\n name as the input columns, i.e., the columns will be replaced).\n column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole\n dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here\n is not just the columns specified in parameter input_columns and output_columns.\n pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={\"col1\":([224,224],0)}\n would pad column with name \"col1\" to a tensor of size [224,224] and fill the missing with 0\n (default=None).\n python_multiprocessing (bool, optional): Parallelize Python function per_batch_map with multi-processing.\n This option could be beneficial if the function is computational heavy (default=False).\n max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy\n data between processes. This is only used if python_multiprocessing is set to True (default=16).\n\n Returns:\n BatchDataset, dataset batched.\n\n Examples:\n >>> # Create a dataset where every 100 rows are combined into a batch\n >>> # and drops the last incomplete batch if there is one.\n >>> dataset = dataset.batch(100, True)\n >>> # resize image according to its batch number, if it's 5-th batch, resize to (5^2, 5^2) = (25, 25)\n >>> def np_resize(col, batchInfo):\n ... output = col.copy()\n ... s = (batchInfo.get_batch_num() + 1) ** 2\n ... index = 0\n ... for c in col:\n ... img = Image.fromarray(c.astype('uint8')).convert('RGB')\n ... img = img.resize((s, s), Image.ANTIALIAS)\n ... output[index] = np.array(img)\n ... index += 1\n ... return (output,)\n >>> dataset = dataset.batch(batch_size=8, input_columns=[\"image\"], per_batch_map=np_resize)\n \"\"\"\n return BatchDataset(self, batch_size, drop_remainder, num_parallel_workers, per_batch_map, input_columns,\n output_columns, column_order, pad_info, python_multiprocessing, max_rowsize)\n\n @check_sync_wait\n def sync_wait(self, condition_name, num_batch=1, callback=None):\n \"\"\"\n Add a blocking condition to the input Dataset. A synchronize action will be applied.\n\n Args:\n condition_name (str): The condition name that is used to toggle sending next row.\n num_batch (int): the number of batches without blocking at the start of each epoch.\n callback (function): The callback function that will be invoked when sync_update is called.\n\n Returns:\n SyncWaitDataset, dataset added a blocking condition.\n\n Raises:\n RuntimeError: If condition name already exists.\n\n Examples:\n >>> import numpy as np\n >>> def gen():\n ... for i in range(100):\n ... yield (np.array(i),)\n >>>\n >>> class Augment:\n ... def __init__(self, loss):\n ... self.loss = loss\n ...\n ... def preprocess(self, input_):\n ... return input_\n ...\n ... def update(self, data):\n ... self.loss = data[\"loss\"]\n >>>\n >>> batch_size = 4\n >>> dataset = ds.GeneratorDataset(gen, column_names=[\"input\"])\n >>>\n >>> aug = Augment(0)\n >>> dataset = dataset.sync_wait(condition_name=\"policy\", callback=aug.update)\n >>> dataset = dataset.map(operations=[aug.preprocess], input_columns=[\"input\"])\n >>> dataset = dataset.batch(batch_size)\n >>> count = 0\n >>> for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):\n ... assert data[\"input\"][0] == count\n ... count += batch_size\n ... data = {\"loss\": count}\n ... dataset.sync_update(condition_name=\"policy\", data=data)\n \"\"\"\n return SyncWaitDataset(self, condition_name, num_batch, callback)\n\n @check_shuffle\n def shuffle(self, buffer_size):\n \"\"\"\n Randomly shuffles the rows of this dataset using the following policy:\n\n 1. Make a shuffle buffer that contains the first buffer_size rows.\n 2. Randomly select an element from the shuffle buffer to be the next row\n propagated to the child node.\n 3. Get the next row (if any) from the parent node and put it in the shuffle buffer.\n 4. Repeat steps 2 and 3 until there are no more rows left in the shuffle buffer.\n\n A random seed can be provided to be used on the first epoch. In every subsequent\n epoch, the seed is changed to a new one, randomly generated value.\n\n Args:\n buffer_size (int): The size of the buffer (must be larger than 1) for\n shuffling. Setting buffer_size equal to the number of rows in the entire\n dataset will result in a global shuffle.\n\n Returns:\n ShuffleDataset, dataset shuffled.\n\n Raises:\n RuntimeError: If exist sync operators before shuffle.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> # Optionally set the seed for the first epoch\n >>> ds.config.set_seed(58)\n >>> # Create a shuffled dataset using a shuffle buffer of size 4\n >>> dataset = dataset.shuffle(4)\n \"\"\"\n return ShuffleDataset(self, buffer_size)\n\n def flat_map(self, func):\n \"\"\"\n Map `func` to each row in dataset and flatten the result.\n\n The specified `func` is a function that must take one 'Ndarray' as input\n and return a 'Dataset'.\n\n Args:\n func (function): A function that must take one 'Ndarray' as an argument and\n return a 'Dataset'.\n\n Returns:\n Dataset, dataset applied by the function.\n\n Examples:\n >>> # use NumpySlicesDataset as an example\n >>> dataset = ds.NumpySlicesDataset([[0, 1], [2, 3]])\n >>>\n >>> def flat_map_func(array):\n ... # create a NumpySlicesDataset with the array\n ... dataset = ds.NumpySlicesDataset(array)\n ... # repeat the dataset twice\n ... dataset = dataset.repeat(2)\n ... return dataset\n >>>\n >>> dataset = dataset.flat_map(flat_map_func)\n >>> # [[0, 1], [0, 1], [2, 3], [2, 3]]\n\n Raises:\n TypeError: If `func` is not a function.\n TypeError: If `func` doesn't return a Dataset.\n \"\"\"\n dataset = None\n if not hasattr(func, '__call__'):\n logger.error(\"func must be a function.\")\n raise TypeError(\"func must be a function.\")\n\n for row_data in self.create_tuple_iterator(output_numpy=True):\n if dataset is None:\n dataset = func(row_data)\n else:\n dataset += func(row_data)\n\n if not isinstance(dataset, Dataset):\n logger.error(\"flat_map must return a Dataset object.\")\n raise TypeError(\"flat_map must return a Dataset object.\")\n return dataset\n\n @check_map\n def map(self, operations, input_columns=None, output_columns=None, column_order=None,\n num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None, max_rowsize=16):\n \"\"\"\n Apply each operation in operations to this dataset.\n\n The order of operations is determined by the position of each operation in the operations parameter.\n operations[0] will be applied first, then operations[1], then operations[2], etc.\n\n Each operation will be passed one or more columns from the dataset as input, and zero or\n more columns will be outputted. The first operation will be passed the columns specified\n in input_columns as input. If there is more than one operator in operations, the outputted\n columns of the previous operation are used as the input columns for the next operation.\n The columns outputted by the very last operation will be assigned names specified by\n output_columns.\n\n Only the columns specified in column_order will be propagated to the child node. These\n columns will be in the same order as specified in column_order.\n\n Args:\n operations (Union[list[TensorOp], list[functions]]): List of operations to be\n applied on the dataset. Operations are applied in the order they appear in this list.\n input_columns (Union[str, list[str]], optional): List of the names of the columns that will be passed to\n the first operation as input. The size of this list must match the number of\n input columns expected by the first operator. (default=None, the first\n operation will be passed however many columns that are required, starting from\n the first column).\n output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by\n the last operation. This parameter is mandatory if len(input_columns) !=\n len(output_columns). The size of this list must match the number of output\n columns of the last operation. (default=None, output columns will have the same\n name as the input columns, i.e., the columns will be replaced).\n column_order (list[str], optional): Specifies the list of all the columns you need in the whole\n dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here\n is not just the columns specified in parameter input_columns and output_columns.\n num_parallel_workers (int, optional): Number of threads used to process the dataset in\n parallel (default=None, the value from the configuration will be used).\n python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker processes. This\n option could be beneficial if the Python operation is computational heavy (default=False).\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None).\n max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy\n data between processes. This is only used if python_multiprocessing is set to True (default=16).\n\n\n Returns:\n MapDataset, dataset after mapping operation.\n\n Examples:\n >>> # dataset is an instance of Dataset which has 2 columns, \"image\" and \"label\".\n >>>\n >>> # Define two operations, where each operation accepts 1 input column and outputs 1 column.\n >>> decode_op = c_vision.Decode(rgb=True)\n >>> random_jitter_op = c_vision.RandomColorAdjust(brightness=(0.8, 0.8), contrast=(1, 1),\n ... saturation=(1, 1), hue=(0, 0))\n >>>\n >>> # 1) Simple map example.\n >>>\n >>> # Apply decode_op on column \"image\". This column will be replaced by the outputted\n >>> # column of decode_op. Since column_order is not provided, both columns \"image\"\n >>> # and \"label\" will be propagated to the child node in their original order.\n >>> dataset = dataset.map(operations=[decode_op], input_columns=[\"image\"])\n >>>\n >>> # Decode and rename column \"image\" to \"decoded_image\".\n >>> dataset = dataset.map(operations=[decode_op], input_columns=[\"image\"], output_columns=[\"decoded_image\"])\n >>>\n >>> # Specify the order of the output columns.\n >>> dataset = dataset.map(operations=[decode_op], input_columns=[\"image\"],\n ... output_columns=None, column_order=[\"label\", \"image\"])\n >>>\n >>> # Rename column \"image\" to \"decoded_image\" and also specify the order of the output columns.\n >>> dataset = dataset.map(operations=[decode_op], input_columns=[\"image\"],\n ... output_columns=[\"decoded_image\"], column_order=[\"label\", \"decoded_image\"])\n >>>\n >>> # Rename column \"image\" to \"decoded_image\" and keep only this column.\n >>> dataset = dataset.map(operations=[decode_op], input_columns=[\"image\"],\n ... output_columns=[\"decoded_image\"], column_order=[\"decoded_image\"])\n >>>\n >>> # A simple example for mapping pyfunc. Renaming columns and specifying column order\n >>> # work in the same way as the previous examples.\n >>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=[\"data\"])\n >>> dataset = dataset.map(operations=[(lambda x: x + 1)], input_columns=[\"data\"])\n >>>\n >>> # 2) Map example with more than one operation.\n >>>\n >>> # Create a dataset where the images are decoded, then randomly color jittered.\n >>> # decode_op takes column \"image\" as input and outputs one column. The column\n >>> # outputted by decode_op is passed as input to random_jitter_op.\n >>> # random_jitter_op will output one column. Column \"image\" will be replaced by\n >>> # the column outputted by random_jitter_op (the very last operation). All other\n >>> # columns are unchanged. Since column_order is not specified, the order of the\n >>> # columns will remain the same.\n >>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=[\"image\"])\n >>>\n >>> # Rename the column outputted by random_jitter_op to \"image_mapped\".\n >>> # Specifying column order works in the same way as examples in 1).\n >>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=[\"image\"],\n ... output_columns=[\"image_mapped\"])\n >>>\n >>> # Map with multiple operations using pyfunc. Renaming columns and specifying column order\n >>> # work in the same way as examples in 1).\n >>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=[\"data\"])\n >>> dataset = dataset.map(operations=[(lambda x: x * x), (lambda x: x - 1)], input_columns=[\"data\"],\n ... output_columns=[\"data_mapped\"])\n >>>\n >>> # 3) Example where number of input columns is not equal to number of output columns.\n >>>\n >>> # operations[0] is a lambda that takes 2 columns as input and outputs 3 columns.\n >>> # operations[1] is a lambda that takes 3 columns as input and outputs 1 column.\n >>> # operations[2] is a lambda that takes 1 column as input and outputs 4 columns.\n >>> #\n >>> # Note: The number of output columns of operation[i] must equal the number of\n >>> # input columns of operation[i+1]. Otherwise, this map call will also result\n >>> # in an error.\n >>> operations = [(lambda x, y: (x, x + y, x + y + 1)),\n ... (lambda x, y, z: x * y * z),\n ... (lambda x: (x % 2, x % 3, x % 5, x % 7))]\n >>>\n >>> # Note: Since the number of input columns is not the same as the number of\n >>> # output columns, the output_columns and column_order parameters must be\n >>> # specified. Otherwise, this map call will also result in an error.\n >>>\n >>> dataset = ds.NumpySlicesDataset(data=([[0, 1, 2]], [[3, 4, 5]]), column_names=[\"x\", \"y\"])\n >>>\n >>> # Propagate all columns to the child node in this order:\n >>> dataset = dataset.map(operations, input_columns=[\"x\", \"y\"],\n ... output_columns=[\"mod2\", \"mod3\", \"mod5\", \"mod7\"],\n ... column_order=[\"mod2\", \"mod3\", \"mod5\", \"mod7\"])\n >>>\n >>> # Propagate some columns to the child node in this order:\n >>> dataset = dataset.map(operations, input_columns=[\"x\", \"y\"],\n ... output_columns=[\"mod2\", \"mod3\", \"mod5\", \"mod7\"],\n ... column_order=[\"mod7\", \"mod3\", \"col2\"])\n \"\"\"\n\n return MapDataset(self, operations, input_columns, output_columns, column_order, num_parallel_workers,\n python_multiprocessing, cache, callbacks, max_rowsize)\n\n @check_filter\n def filter(self, predicate, input_columns=None, num_parallel_workers=None):\n \"\"\"\n Filter dataset by prediction.\n\n Note:\n If input_columns not provided or provided with empty, all columns will be used.\n\n Args:\n predicate (callable): Python callable which returns a boolean value. If False then filter the element.\n input_columns (Union[str, list[str]], optional): List of names of the input columns, when\n default=None, the predicate will be applied on all columns in the dataset.\n num_parallel_workers (int, optional): Number of workers to process the dataset\n in parallel (default=None).\n\n Returns:\n FilterDataset, dataset filtered.\n\n Examples:\n >>> # generator data(0 ~ 63)\n >>> # filter the data that greater than or equal to 11\n >>> dataset = dataset.filter(predicate=lambda data: data < 11, input_columns = [\"data\"])\n \"\"\"\n return FilterDataset(self, predicate, input_columns, num_parallel_workers)\n\n @check_repeat\n def repeat(self, count=None):\n \"\"\"\n Repeat this dataset `count` times. Repeat infinitely if the count is None or -1.\n\n Note:\n The order of using repeat and batch reflects the number of batches. It is recommended that\n the repeat operation is used after the batch operation.\n\n Args:\n count (int): Number of times the dataset is going to be repeated (default=None).\n\n Returns:\n RepeatDataset, dataset repeated.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>>\n >>> # Create a dataset where the dataset is repeated for 50 epochs\n >>> dataset = dataset.repeat(50)\n >>>\n >>> # Create a dataset where each epoch is shuffled individually\n >>> dataset = dataset.shuffle(10)\n >>> dataset = dataset.repeat(50)\n >>>\n >>> # Create a dataset where the dataset is first repeated for\n >>> # 50 epochs before shuffling. The shuffle operator will treat\n >>> # the entire 50 epochs as one big dataset.\n >>> dataset = dataset.repeat(50)\n >>> dataset = dataset.shuffle(10)\n \"\"\"\n return RepeatDataset(self, count)\n\n @check_skip\n def skip(self, count):\n \"\"\"\n Skip the first N elements of this dataset.\n\n Args:\n count (int): Number of elements in the dataset to be skipped.\n\n Returns:\n SkipDataset, dataset that containing rows like origin rows subtract skipped rows.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> # Create a dataset which skips first 3 elements from data\n >>> dataset = dataset.skip(3)\n \"\"\"\n return SkipDataset(self, count)\n\n @check_take\n def take(self, count=-1):\n \"\"\"\n Takes at most given numbers of elements from the dataset.\n\n Note:\n 1. If count is greater than the number of elements in the dataset or equal to -1,\n all the elements in dataset will be taken.\n 2. The order of using take and batch matters. If take is before batch operation,\n then take given number of rows; otherwise take given number of batches.\n\n Args:\n count (int, optional): Number of elements to be taken from the dataset (default=-1).\n\n Returns:\n TakeDataset, dataset taken.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> # Create a dataset where the dataset includes 50 elements.\n >>> dataset = dataset.take(50)\n \"\"\"\n return TakeDataset(self, count)\n\n def _get_absolute_split_sizes(self, sizes):\n \"\"\"\n Internal method called by split to calculate absolute split sizes and to\n do some error checking after calculating absolute split sizes.\n\n Returns:\n int, absolute split sizes of the dataset.\n \"\"\"\n # Call get_dataset_size here and check input here because\n # don't want to call this once in check_split and another time in\n # here again\n dataset_size = self.get_dataset_size()\n\n if dataset_size is None or dataset_size <= 0:\n raise RuntimeError(\"dataset_size is unknown, unable to split.\")\n\n if not isinstance(sizes, list):\n raise RuntimeError(\"sizes must be a list.\")\n\n all_int = all(isinstance(item, int) for item in sizes)\n if all_int:\n sizes_sum = sum(sizes)\n if sizes_sum != dataset_size:\n raise RuntimeError(\"Sum of split sizes {} is not equal to dataset size {}.\"\n .format(sizes_sum, dataset_size))\n return sizes\n\n absolute_sizes = []\n for item in sizes:\n absolute_size = int(round(item * dataset_size))\n if absolute_size == 0:\n raise RuntimeError(\"Split percentage {} is too small.\".format(item))\n absolute_sizes.append(absolute_size)\n\n absolute_sizes_sum = sum(absolute_sizes)\n\n # if we still need more rows, give them to the first split.\n # if we have too many rows, remove the extras from the first split that has\n # enough rows.\n size_difference = int(dataset_size - absolute_sizes_sum)\n if size_difference > 0:\n absolute_sizes[0] += size_difference\n else:\n for i, _ in enumerate(absolute_sizes):\n if absolute_sizes[i] + size_difference > 0:\n absolute_sizes[i] += size_difference\n break\n\n if sum(absolute_sizes) != dataset_size:\n raise RuntimeError(\"Sum of calculated split sizes {} is not equal to dataset size {}.\"\n .format(absolute_sizes_sum, dataset_size))\n\n return absolute_sizes\n\n @check_split\n def split(self, sizes, randomize=True):\n \"\"\"\n Split the dataset into smaller, non-overlapping datasets.\n\n This is a general purpose split function which can be called from any operator in the pipeline.\n There is another, optimized split function, which will be called automatically if ds.split is\n called where ds is a MappableDataset.\n\n Args:\n sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is\n provided, the dataset will be split into n datasets of size s1, size s2, …, size sn\n respectively. If the sum of all input sizes does not equal the original dataset size, an\n error will throw.\n If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1\n and must sum to 1, otherwise an error will throw. The dataset will be split into n\n Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the\n original dataset.\n If after rounding:\n\n - Any size equals 0, an error will occur.\n - The sum of split sizes < K, the difference of K - sigma(round(fi * k)) will be added to the first\n split.\n - The sum of split sizes > K, the difference of sigma(round(fi * K)) - K will be removed from the first\n large enough split such that it will have at least 1 row after removing the difference.\n\n randomize (bool, optional): Determines whether or not to split the data randomly (default=True).\n If True, the data will be randomly split. Otherwise, each split will be created with\n consecutive rows from the dataset.\n\n Note:\n 1. Dataset cannot be sharded if split is going to be called.\n 2. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.\n Shuffling the dataset may not be deterministic, which means the data in each split\n will be different in each epoch.\n\n Raises:\n RuntimeError: If get_dataset_size returns None or is not supported for this dataset.\n RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not\n equal the dataset size.\n RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.\n RuntimeError: If the dataset is sharded prior to calling split.\n ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the\n floats don't sum to 1.\n\n Returns:\n tuple(Dataset), a tuple of datasets that have been split.\n\n Examples:\n >>> # TextFileDataset is not a mappable dataset, so this non-optimized split will be called.\n >>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!\n >>> dataset = ds.TextFileDataset(text_file_dataset_dir, shuffle=False)\n >>> train_dataset, test_dataset = dataset.split([0.9, 0.1])\n \"\"\"\n if self.is_shuffled():\n logger.warning(\"Dataset is shuffled before split.\")\n\n if self.is_sharded():\n raise RuntimeError(\"Dataset should not be sharded before split.\")\n\n absolute_sizes = self._get_absolute_split_sizes(sizes)\n splits = []\n rows_to_skip = 0\n for size in absolute_sizes:\n ds = copy.deepcopy(self)\n if randomize:\n # want to shuffle the same way every epoch before split\n # in alter_tree, shuffle buffer is minimum 10000, so use 10000 here\n ds = ds.shuffle(10000)\n ds.reshuffle_each_epoch = False\n\n if rows_to_skip > 0:\n ds = ds.skip(rows_to_skip)\n\n ds = ds.take(size)\n splits.append(ds)\n\n rows_to_skip += size\n\n return tuple(splits)\n\n @check_zip_dataset\n def zip(self, datasets):\n \"\"\"\n Zip the datasets in the sense of input tuple of datasets. Columns in the input datasets must have different\n name.\n\n Args:\n datasets (Union[tuple, class Dataset]): A tuple of datasets or a single class Dataset\n to be zipped together with this dataset.\n\n Returns:\n ZipDataset, dataset zipped.\n\n Examples:\n >>> # Create a dataset which is the combination of dataset and dataset_1\n >>> dataset = dataset.zip(dataset_1)\n \"\"\"\n if isinstance(datasets, tuple):\n datasets = (self, *datasets)\n elif isinstance(datasets, Dataset):\n datasets = (self, datasets)\n else:\n raise TypeError(\"Invalid datasets, expected Dataset object or tuple of Dataset, but got %s!\" % datasets)\n return ZipDataset(datasets)\n\n @check_concat\n def concat(self, datasets):\n \"\"\"\n Concatenate the dataset objects in the input list.\n Performing \"+\" operation on dataset objects can achieve the same effect.\n\n Note:\n The column name, and rank and type of the column data must be the same in the input datasets.\n\n Args:\n datasets (Union[list, class Dataset]): A list of datasets or a single class Dataset\n to be concatenated together with this dataset.\n\n Returns:\n ConcatDataset, dataset concatenated.\n\n Examples:\n >>> # Create a dataset by concatenating dataset_1 and dataset_2 with \"+\" operator\n >>> dataset = dataset_1 + dataset_2\n >>> # Create a dataset by concatenating dataset_1 and dataset_2 with concat operation\n >>> dataset = dataset_1.concat(dataset_2)\n \"\"\"\n if isinstance(datasets, Dataset):\n datasets = [self] + [datasets]\n elif isinstance(datasets, list):\n datasets = [self] + datasets\n else:\n raise TypeError(\"Invalid datasets, expected Dataset object or list of Dataset, but got %s!\" % datasets)\n return ConcatDataset(datasets)\n\n @check_rename\n def rename(self, input_columns, output_columns):\n \"\"\"\n Rename the columns in input datasets.\n\n Args:\n input_columns (Union[str, list[str]]): List of names of the input columns.\n output_columns (Union[str, list[str]]): List of names of the output columns.\n\n Returns:\n RenameDataset, dataset renamed.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> input_columns = [\"input_col1\", \"input_col2\", \"input_col3\"]\n >>> output_columns = [\"output_col1\", \"output_col2\", \"output_col3\"]\n >>>\n >>> # Create a dataset where input_col1 is renamed to output_col1, and\n >>> # input_col2 is renamed to output_col2, and input_col3 is renamed\n >>> # to output_col3.\n >>> dataset = dataset.rename(input_columns=input_columns, output_columns=output_columns)\n \"\"\"\n\n return RenameDataset(self, input_columns, output_columns)\n\n @check_project\n def project(self, columns):\n \"\"\"\n Project certain columns in input dataset.\n\n The specified columns will be selected from the dataset and passed into\n the pipeline with the order specified. The other columns are discarded.\n\n Args:\n columns(Union[str, list[str]]): List of names of the columns to project.\n\n Returns:\n ProjectDataset, dataset projected.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> columns_to_project = [\"column3\", \"column1\", \"column2\"]\n >>>\n >>> # Create a dataset that consists of column3, column1, column2\n >>> # in that order, regardless of the original order of columns.\n >>> dataset = dataset.project(columns=columns_to_project)\n \"\"\"\n\n return ProjectDataset(self, columns)\n\n def build_vocab(self, columns, freq_range, top_k, special_tokens, special_first):\n \"\"\"\n Function to create a Vocab from source dataset\n\n Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab\n which contains top_k most frequent words (if top_k is specified)\n\n Args:\n\n columns(Union[str, list[str]]): Column names to get words from.\n freq_range(tuple[int]): A tuple of integers (min_frequency, max_frequency). Words within the frequency\n range will be stored.\n Naturally 0 <= min_frequency <= max_frequency <= total_words. min_frequency/max_frequency\n can be set to default, which corresponds to 0/total_words separately.\n top_k(int): Number of words to be built into vocab. top_k most frequent words are\n taken. The top_k is taken after freq_range. If not enough top_k, all words will be taken\n special_tokens(list[str]): A list of strings, each one is a special token.\n special_first(bool): Whether special_tokens will be prepended/appended to vocab, If special_tokens\n is specified and special_first is set to default, special_tokens will be prepended.\n\n Returns:\n Vocab, vocab built from the dataset.\n\n Examples:\n >>> import numpy as np\n >>>\n >>> def gen_corpus():\n ... # key: word, value: number of occurrences, reason for using letters is so their order is apparent\n ... corpus = {\"Z\": 4, \"Y\": 4, \"X\": 4, \"W\": 3, \"U\": 3, \"V\": 2, \"T\": 1}\n ... for k, v in corpus.items():\n ... yield (np.array([k] * v, dtype='S'),)\n >>> column_names = [\"column1\"]\n >>> dataset = ds.GeneratorDataset(gen_corpus, column_names)\n >>> dataset = dataset.build_vocab(columns=[\"column1\"],\n ... freq_range=(1, 10), top_k=5,\n ... special_tokens=[\"<pad>\", \"<unk>\"],\n ... special_first=True)\n\n \"\"\"\n vocab = cde.Vocab()\n columns = replace_none(columns, [])\n if not isinstance(columns, list):\n columns = [columns]\n\n freq_range = replace_none(freq_range, (0, 9223372036854775807))\n if freq_range[0] is None:\n freq_range = (0, freq_range[1])\n if freq_range[1] is None:\n freq_range = (freq_range[0], 9223372036854775807)\n special_tokens = replace_none(special_tokens, [])\n top_k = replace_none(top_k, 9223372036854775807)\n\n ir_tree, api_tree = self.create_ir_tree()\n\n # vocab node\n vocab_node = cde.BuildVocabNode(ir_tree, vocab, columns, freq_range, top_k, special_tokens, special_first)\n\n runtime_context = cde.PythonRuntimeContext()\n runtime_context.Init()\n\n # build vocab\n consumer = cde.PythonBuildVocabConsumer()\n consumer.Init(vocab_node)\n runtime_context.AssignConsumer(consumer)\n\n consumer.Start()\n del api_tree\n\n return vocab\n\n def build_sentencepiece_vocab(self, columns, vocab_size, character_coverage, model_type, params):\n \"\"\"\n Function to create a SentencePieceVocab from source dataset\n\n Build a SentencePieceVocab from a dataset.\n\n Args:\n\n columns(list[str]): Column names to get words from.\n vocab_size(int): Vocabulary size.\n character_coverage(int): Percentage of characters covered by the model, must be between\n 0.98 and 1.0 Good defaults are: 0.9995 for languages with rich character sets like\n Japanese or Chinese character sets, and 1.0 for other languages with small character sets\n like English or Latin.\n model_type(SentencePieceModel): Model type. Choose from unigram (default), bpe, char, or word.\n The input sentence must be pretokenized when using word type.\n params(dict): Any extra optional parameters of sentencepiece library according to your raw data\n\n Returns:\n SentencePieceVocab, vocab built from the dataset.\n\n Examples:\n >>> from mindspore.dataset.text import SentencePieceModel\n >>>\n >>> def gen_corpus():\n ... # key: word, value: number of occurrences, reason for using letters is so their order is apparent\n ... corpus = {\"Z\": 4, \"Y\": 4, \"X\": 4, \"W\": 3, \"U\": 3, \"V\": 2, \"T\": 1}\n ... for k, v in corpus.items():\n ... yield (np.array([k] * v, dtype='S'),)\n >>> column_names = [\"column1\",\"column2\",\"column3\"]\n >>> dataset = ds.GeneratorDataset(gen_corpus, column_names)\n >>> dataset = dataset.build_sentencepiece_vocab(columns=[\"column3\", \"column1\", \"column2\"],\n ... vocab_size=5000,\n ... character_coverage=0.9995,\n ... model_type=SentencePieceModel.UNIGRAM,\n ... params={})\n \"\"\"\n vocab = cde.SentencePieceVocab()\n\n ir_tree, api_tree = self.create_ir_tree()\n\n # vocab node\n vocab_node = cde.BuildSentenceVocabNode(ir_tree, vocab, columns, vocab_size, character_coverage, model_type,\n params)\n\n runtime_context = cde.PythonRuntimeContext()\n runtime_context.Init()\n\n # build vocab\n consumer = cde.PythonBuildVocabConsumer()\n consumer.Init(vocab_node)\n runtime_context.AssignConsumer(consumer)\n\n consumer.Start()\n del api_tree\n\n return vocab\n\n def apply(self, apply_func):\n \"\"\"\n Apply a function in this dataset.\n\n Args:\n apply_func (function): A function that must take one 'Dataset' as an argument and\n return a preprocessed 'Dataset'.\n\n Returns:\n Dataset, dataset applied by the function.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>>\n >>> # Declare an apply_func function which returns a Dataset object\n >>> def apply_func(data):\n ... data = data.batch(2)\n ... return data\n >>>\n >>> # Use apply to call apply_func\n >>> dataset = dataset.apply(apply_func)\n\n Raises:\n TypeError: If apply_func is not a function.\n TypeError: If apply_func doesn't return a Dataset.\n \"\"\"\n\n if not hasattr(apply_func, '__call__'):\n raise TypeError(\"apply_func must be a function.\")\n\n dataset = apply_func(self)\n if not isinstance(dataset, Dataset):\n raise TypeError(\"apply_func must return a dataset.\")\n return dataset\n\n @check_device_send\n def device_que(self, send_epoch_end=True, create_data_info_queue=False):\n \"\"\"\n Return a transferred Dataset that transfers data through a device.\n\n Args:\n send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).\n create_data_info_queue (bool, optional): Whether to create queue which stores\n types and shapes of data or not(default=False).\n\n Note:\n If device is Ascend, features of data will be transferred one by one. The limitation\n of data transmission per time is 256M.\n\n Returns:\n TransferDataset, dataset for transferring.\n \"\"\"\n return self.to_device(send_epoch_end=send_epoch_end, create_data_info_queue=create_data_info_queue)\n\n @check_device_send\n def to_device(self, send_epoch_end=True, create_data_info_queue=False):\n \"\"\"\n Transfer data from CPU to GPU or Ascend or other devices.\n\n Args:\n send_epoch_end (bool, optional): Whether to send the end of sequence to device or not (default=True).\n create_data_info_queue (bool, optional): Whether to create queue which stores\n types and shapes of data or not(default=False).\n\n Note:\n If device is Ascend, features of data will be transferred one by one. The limitation\n of data transmission per second is 256M.\n\n Returns:\n TransferDataset, dataset for transferring.\n\n Raises:\n RuntimeError: If distribution file path is given but failed to read.\n \"\"\"\n return TransferDataset(self, send_epoch_end, create_data_info_queue)\n\n @check_save\n def save(self, file_name, num_files=1, file_type='mindrecord'):\n \"\"\"\n Save the dynamic data processed by the dataset pipeline in common dataset format.\n Supported dataset formats: 'mindrecord' only\n\n Implicit type casting exists when saving data as 'mindrecord'. The transform table shows how to do type casting.\n\n .. list-table:: Implicit Type Casting when Saving as 'mindrecord'\n :widths: 25 25 50\n :header-rows: 1\n\n * - Type in 'dataset'\n - Type in 'mindrecord'\n - Details\n * - bool\n - None\n - Not supported\n * - int8\n - int32\n -\n * - uint8\n - bytes(1D uint8)\n - Drop dimension\n * - int16\n - int32\n -\n * - uint16\n - int32\n -\n * - int32\n - int32\n -\n * - uint32\n - int64\n -\n * - int64\n - int64\n -\n * - uint64\n - None\n - Not supported\n * - float16\n - float32\n -\n * - float32\n - float32\n -\n * - float64\n - float64\n -\n * - string\n - string\n - Multi-dimensional string not supported\n\n Note:\n 1. To save the samples in order, set dataset's shuffle to False and num_files to 1.\n 2. Before calling the function, do not use batch operator, repeat operator or data augmentation operators\n with random attribute in map operator.\n 3. When array dimension is variable, one-dimensional arrays or\n multi-dimensional arrays with variable dimension 0 are supported.\n 4. Mindrecord does not support DE_UINT64, multi-dimensional DE_UINT8(drop dimension) nor\n multi-dimensional DE_STRING.\n\n Args:\n file_name (str): Path to dataset file.\n num_files (int, optional): Number of dataset files (default=1).\n file_type (str, optional): Dataset format (default='mindrecord').\n\n \"\"\"\n ir_tree, api_tree = self.create_ir_tree()\n\n runtime_context = cde.PythonRuntimeContext()\n runtime_context.Init()\n consumer = cde.PythonSaveToDisk(file_name, num_files, file_type)\n consumer.Init(ir_tree)\n runtime_context.AssignConsumer(consumer)\n\n consumer.Save()\n _set_dataset_permissions(file_name, num_files)\n del api_tree\n\n @check_tuple_iterator\n def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):\n \"\"\"\n Create an iterator over the dataset. The datatype retrieved back will be a list of ndarrays.\n\n To specify which columns to list and the order needed, use columns_list. If columns_list\n is not provided, the order of the columns will remain unchanged.\n\n Args:\n columns (list[str], optional): List of columns to be used to specify the order of columns\n (default=None, means all columns).\n num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.\n (default=-1, iterator can be iterated infinite number of epochs)\n output_numpy (bool, optional): Whether or not to output NumPy datatype.\n If output_numpy=False, iterator will output MSTensor (default=False).\n do_copy (bool, optional): when output data type is mindspore.Tensor,\n use this param to select the conversion method, only take False for better performance (default=True).\n\n Returns:\n TupleIterator, tuple iterator over the dataset.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> iterator = dataset.create_tuple_iterator()\n >>> for item in iterator:\n ... # item is a list\n ... print(type(item))\n ... break\n <class 'list'>\n \"\"\"\n if output_numpy is None:\n output_numpy = False\n\n if Dataset._noop_mode():\n return DummyIterator(self, 'tuple')\n return TupleIterator(self, columns, num_epochs, output_numpy, do_copy)\n\n @check_dict_iterator\n def create_dict_iterator(self, num_epochs=-1, output_numpy=False):\n \"\"\"\n Create an iterator over the dataset. The data retrieved will be a dictionary datatype.\n\n The order of the columns in the dictionary may not be the same as the original order.\n\n Args:\n num_epochs (int, optional): Maximum number of epochs that iterator can be iterated\n (default=-1, iterator can be iterated infinite number of epochs).\n output_numpy (bool, optional): Whether or not to output NumPy datatype,\n if output_numpy=False, iterator will output MSTensor (default=False).\n\n Returns:\n DictIterator, dictionary iterator over the dataset.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> iterator = dataset.create_dict_iterator()\n >>> for item in iterator:\n ... # item is a dict\n ... print(type(item))\n ... break\n <class 'dict'>\n \"\"\"\n if output_numpy is None:\n output_numpy = False\n\n if Dataset._noop_mode():\n return DummyIterator(self, 'dict')\n return DictIterator(self, num_epochs, output_numpy)\n\n def __iter__(self):\n \"\"\"Create an iterator over the dataset.\"\"\"\n return self.create_tuple_iterator(num_epochs=1)\n\n @property\n def input_indexs(self):\n \"\"\"\n Get Input Index Information\n\n Returns:\n tuple, tuple of the input index information.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> # set input_indexs\n >>> dataset.input_indexs = 10\n >>> print(dataset.input_indexs)\n 10\n \"\"\"\n if self._input_indexs != ():\n return self._input_indexs\n\n # find input_indexes of children\n children_input_index = [child.input_indexs for child in self.children]\n\n # in case of more than one child, return the first input_indexes\n for cix in children_input_index:\n if cix != ():\n return cix\n\n # if all children's input_indexes are () or the node is a leaf\n return self._input_indexs\n\n @input_indexs.setter\n def input_indexs(self, value):\n self._input_indexs = value\n\n def copy_batch_size(self, value):\n self._batch_size = value\n\n def _init_tree_getters(self):\n \"\"\"\n Get pipeline information.\n \"\"\"\n ir_tree, api_tree = self.create_ir_tree()\n\n runtime_context = cde.PythonRuntimeContext()\n runtime_context.Init()\n getter = cde.TreeGetters()\n getter.Init(ir_tree)\n runtime_context.AssignConsumer(getter)\n return getter, runtime_context, api_tree\n\n def __init_size_getter(self):\n \"\"\"\n Get pipeline information.\n \"\"\"\n ir_tree, api_tree = self.create_ir_tree()\n\n runtime_context = cde.PythonRuntimeContext()\n runtime_context.Init()\n getter = cde.DatasetSizeGetters()\n getter.Init(ir_tree)\n runtime_context.AssignConsumer(getter)\n return getter, runtime_context, api_tree\n\n def get_col_names(self):\n \"\"\"\n Return the names of the columns in dataset.\n\n Returns:\n list, list of column names in the dataset.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> col_names = dataset.get_col_names()\n \"\"\"\n if self._col_names is None:\n runtime_getter = self._init_tree_getters()\n self._col_names = runtime_getter[0].GetColumnNames()\n self.close_pool()\n runtime_getter[2].notify_watchdog()\n return self._col_names\n\n def output_shapes(self):\n \"\"\"\n Get the shapes of output data.\n\n Returns:\n list, list of shapes of each column.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> output_shapes = dataset.output_shapes()\n \"\"\"\n if self.saved_output_shapes is None:\n runtime_getter = self._init_tree_getters()\n self.saved_output_shapes = runtime_getter[0].GetOutputShapes()\n self.saved_output_types = runtime_getter[0].GetOutputTypes()\n self.close_pool()\n runtime_getter[2].notify_watchdog()\n if self.dynamic_setting[0]:\n self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()\n return self.saved_output_shapes\n\n def output_types(self):\n \"\"\"\n Get the types of output data.\n\n Returns:\n list, list of data types.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> output_types = dataset.output_types()\n \"\"\"\n if self.saved_output_types is None:\n runtime_getter = self._init_tree_getters()\n self.saved_output_shapes = runtime_getter[0].GetOutputShapes()\n self.saved_output_types = runtime_getter[0].GetOutputTypes()\n self.close_pool()\n runtime_getter[2].notify_watchdog()\n if self.dynamic_setting[0]:\n self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()\n return self.saved_output_types\n\n def get_dataset_size(self):\n \"\"\"\n Return the number of batches in an epoch.\n\n Returns:\n int, number of batches.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> dataset_size = dataset.get_dataset_size()\n \"\"\"\n if self.dataset_size is None:\n runtime_getter = self.__init_size_getter()\n self.dataset_size = runtime_getter[0].GetDatasetSize(False)\n self.close_pool()\n runtime_getter[2].notify_watchdog()\n return self.dataset_size\n\n def set_dynamic_columns(self, columns=None):\n \"\"\"\n Set dynamic shape information of source data, it should be set after the pipeline is defined.\n\n Args:\n columns (dict): A dict contains shape information of each column in dataset.\n The value of shape[i] is :py:obj:`None` indicates that the data length of shape[i] is dynamic.\n\n Examples:\n >>> import numpy as np\n >>>\n >>> def generator1():\n >>> for i in range(1, 100):\n >>> yield np.ones((16, i, 83)), np.array(i)\n >>>\n >>> dataset = ds.GeneratorDataset(generator1, [\"data1\", \"data2\"])\n >>> dataset.set_dynamic_columns(columns={\"data1\": [16, None, 83], \"data2\": []})\n \"\"\"\n if not isinstance(columns, dict):\n raise TypeError(\"Pass a dict to set dynamic shape, example: {\\\"data1\\\": [16, None, 256]}\")\n self.dynamic_setting[0] = True\n self.dynamic_setting[1] = columns\n\n def dynamic_min_max_shapes(self):\n \"\"\"\n Get minimum and maximum data length of dynamic source data, for dynamic graph compilation.\n\n Returns:\n lists, min_shapes, max_shapes of source data.\n\n Examples:\n >>> import numpy as np\n >>>\n >>> def generator1():\n >>> for i in range(1, 100):\n >>> yield np.ones((16, i, 83)), np.array(i)\n >>>\n >>> dataset = ds.GeneratorDataset(generator1, [\"data1\", \"data2\"])\n >>> dataset.set_dynamic_columns(columns={\"data1\": [16, None, 83], \"data2\": []})\n >>> min_shapes, max_shapes = dataset.dynamic_min_max_shapes()\n \"\"\"\n if self.saved_min_shapes is None or self.saved_max_shapes is None:\n self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()\n return self.saved_min_shapes, self.saved_max_shapes\n\n def _dynamic_output_shapes(self):\n \"\"\"\n Get dynamic information of source data.\n\n Returns:\n lists, dynamic_shapes, min_shapes, max_shapes of source data.\n \"\"\"\n if not self.dynamic_setting[1]:\n raise RuntimeError(\"dynamic_columns is not set, call set_dynamic_columns() by final Dataset Op.\")\n\n if self.saved_output_shapes is not None and self.saved_min_shapes is not None and \\\n self.saved_max_shapes is not None:\n return self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes\n\n logger.warning(\"Calculating dynamic shape of input data, this will take a few minutes...\")\n # Assume data1 shape is dynamic, data2 shape is fix\n # {\"data1\": [batch_size, None, feat_len], \"data2\": [batch_size, feat_len]}\n dynamic_columns = self.dynamic_setting[1]\n # [\"data1\", \"data2\"]\n dataset_columns = self.get_col_names()\n for column in dynamic_columns:\n if column not in dataset_columns:\n raise RuntimeError(\"dynamic column [\" + column + \"] does not match any column in dataset: \" +\n str(dataset_columns))\n\n # Shape[1] of data1 is variable\n # {\"data1\": {(batch_size, 100, feat_len), (16, 200, 83)}, \"data2\": {(batch_size, feat_len)}}\n column_shape_set = {col: set() for col in dataset_columns}\n dataset_size_counter = 0\n for data in self.create_dict_iterator(num_epochs=1, output_numpy=True):\n dataset_size_counter += 1\n for col in data.keys():\n if col in dynamic_columns:\n shape_mismatch = \"dynamic column [\" + col + \"] with shape \" + str(dynamic_columns[col]) + \\\n \" does not match dataset column [\" + col + \"] with shape \" + str(list(data[col].shape))\n if data[col].ndim != len(dynamic_columns[col]):\n raise RuntimeError(shape_mismatch)\n for dim in range(len(dynamic_columns[col])):\n if dynamic_columns[col][dim] is not None and dynamic_columns[col][dim] != data[col].shape[dim]:\n raise RuntimeError(shape_mismatch)\n column_shape_set[col].add(tuple(data[col].shape))\n\n # we get dataset_size after dryrun\n self.dataset_size = dataset_size_counter\n\n min_shapes, max_shapes, dynamic_shapes = list(), list(), list()\n for col, shape_set in column_shape_set.items():\n if len(shape_set) > 1:\n if col not in dynamic_columns:\n raise RuntimeError(\"column [\" + col + \"] has dynamic shape but not set by set_dynamic_columns()\" +\n \", shapes of [\" + col + \"]: \" + str(list(shape_set)))\n shape_npy = np.array(list(shape_set))\n max_shape = shape_npy.max(axis=0)\n min_shape = shape_npy.min(axis=0)\n\n # Set min shape to 1 due to unknown shuffle\n min_shape = np.where(np.equal(dynamic_columns[col], None), 1, min_shape)\n # Set dynamic dim to -1 for ME\n dynamic_shape = np.where(np.equal(dynamic_columns[col], None), -1, dynamic_columns[col])\n\n max_shapes.append(max_shape.tolist())\n min_shapes.append(min_shape.tolist())\n dynamic_shapes.append(dynamic_shape.tolist())\n else:\n # Also append fix shape to keep order of column shape\n fix_shape = list(list(shape_set)[0])\n max_shapes.append(fix_shape)\n min_shapes.append(fix_shape)\n dynamic_shapes.append(fix_shape)\n if col in dynamic_columns:\n logger.warning(\"column [\" + col + \"] has no dynamic shape but set by set_dynamic_columns()\")\n # Set min shape to 1 due to unknown shuffle\n min_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), 1, fix_shape).tolist()\n # Set dynamic dim to -1 for ME\n dynamic_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), -1, fix_shape).tolist()\n return dynamic_shapes, min_shapes, max_shapes\n\n def num_classes(self):\n \"\"\"\n Get the number of classes in a dataset.\n\n Returns:\n int, number of classes.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> num_classes = dataset.num_classes()\n \"\"\"\n if self._num_classes is None:\n runtime_getter = self._init_tree_getters()\n self._num_classes = runtime_getter[0].GetNumClasses()\n self.close_pool()\n runtime_getter[2].notify_watchdog()\n if self._num_classes == -1:\n return None\n return self._num_classes\n\n def get_sync_notifiers(self):\n if self.children:\n return self.children[0].get_sync_notifiers()\n return {}\n\n def disable_sync(self):\n if self.children:\n return self.children[0].disable_sync()\n return {}\n\n def is_sync(self):\n if self.children:\n return self.children[0].is_sync()\n return False\n\n def sync_update(self, condition_name, num_batch=None, data=None):\n \"\"\"\n Release a blocking condition and trigger callback with given data.\n\n Args:\n condition_name (str): The condition name that is used to toggle sending next row.\n num_batch (Union[int, None]): The number of batches (rows) that are released.\n When num_batch is None, it will default to the number specified by the\n sync_wait operator (default=None).\n data (Any): The data passed to the callback, user defined (default=None).\n \"\"\"\n if (not isinstance(num_batch, int) and num_batch is not None) or \\\n (isinstance(num_batch, int) and num_batch <= 0):\n # throwing exception, disable all sync_wait in pipeline\n self.disable_sync()\n raise RuntimeError(\"Sync_update batch size can only be positive integer, got : {}.\".format(num_batch))\n notifiers_dict = self.get_sync_notifiers()\n if not isinstance(condition_name, str):\n raise TypeError(\"Argument condition_name with value {} is not of type str, but got {}.\"\n .format(condition_name, type(condition_name)))\n if condition_name not in notifiers_dict:\n # throwing exception, disable all sync_wait in pipeline\n self.disable_sync()\n raise RuntimeError(\"Condition name not found.\")\n if num_batch is not None:\n num_batch *= self.get_batch_size()\n notifiers_dict[condition_name](num_batch, data)\n\n def get_batch_size(self):\n \"\"\"\n Return the size of batch.\n\n Returns:\n int, the number of data in a batch.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> batch_size = dataset.get_batch_size()\n \"\"\"\n if self._batch_size is None:\n runtime_getter = self._init_tree_getters()\n self._batch_size = runtime_getter[0].GetBatchSize()\n if self._batch_size is None:\n self._batch_size = 1\n return self._batch_size\n\n def get_repeat_count(self):\n \"\"\"\n Get the replication times in RepeatDataset (default is 1).\n\n Returns:\n int, the count of repeat.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> repeat_count = dataset.get_repeat_count()\n \"\"\"\n if self._repeat_count is None:\n runtime_getter = self._init_tree_getters()\n self._repeat_count = runtime_getter[0].GetRepeatCount()\n if self._repeat_count is None:\n self._repeat_count = 1\n return self._repeat_count\n\n def get_class_indexing(self):\n \"\"\"\n Return the class index.\n\n Returns:\n dict, a str-to-int mapping from label name to index.\n dict, a str-to-list<int> mapping from label name to index for Coco ONLY. The second number\n in the list is used to indicate the super category.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> class_indexing = dataset.get_class_indexing()\n \"\"\"\n if self.children:\n return self.children[0].get_class_indexing()\n return {}\n\n def reset(self):\n \"\"\"Reset the dataset for next epoch.\"\"\"\n\n def is_shuffled(self):\n \"\"\"Returns True if the dataset or its children is shuffled.\"\"\"\n for input_dataset in self.children:\n if input_dataset.is_shuffled():\n return True\n\n return False\n\n def is_sharded(self):\n \"\"\"Returns True if the dataset or its children is sharded.\"\"\"\n for input_dataset in self.children:\n if input_dataset.is_sharded():\n return True\n\n return False\n\n def parse(self, children=None):\n raise NotImplementedError(\"Dataset has to implement parse method.\")\n\n def post_parse(self, ir_node):\n if self.cache:\n ir_node = ir_node.set_cache_client(self.cache.cache_client)\n if self.num_parallel_workers:\n ir_node = ir_node.set_num_workers(self.num_parallel_workers)\n\n return ir_node\n\n\nclass SourceDataset(Dataset):\n \"\"\"\n Abstract class to represent a source dataset which produces content to the data pipeline.\n \"\"\"\n\n def __init__(self, num_parallel_workers=None, num_samples=None, shuffle=True, num_shards=None, shard_id=None,\n cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, cache=cache)\n self.num_samples = replace_none(num_samples, 0)\n self.num_shards = replace_none(num_shards, 1)\n self.shard_id = replace_none(shard_id, 0)\n\n if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):\n raise TypeError(\"shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or \"\n \"'Shuffle.FILES' or 'Shuffle.INFILE'.\")\n\n self.shuffle_flag = 2 # Global shuffle\n if not isinstance(shuffle, Shuffle):\n if shuffle is None or shuffle:\n self.shuffle_flag = 2 # Global shuffle\n else:\n self.shuffle_flag = 0 # No shuffle\n else:\n if shuffle == Shuffle.GLOBAL:\n self.shuffle_flag = 2 # Global shuffle\n elif shuffle == Shuffle.FILES:\n self.shuffle_flag = 1 # Files shuffle\n elif shuffle == Shuffle.INFILE:\n self.shuffle_flag = 3 # Infile shuffle\n\n def parse(self, children=None):\n raise NotImplementedError(\"Dataset has to implement parse method.\")\n\n @staticmethod\n def _find_files(patterns):\n \"\"\"\n Utility function to search for files with the given glob patterns.\n\n Args:\n patterns (Union[str, list[str]]): String or list of patterns to be searched.\n\n Returns:\n list, list of files.\n \"\"\"\n\n if not isinstance(patterns, list):\n patterns = [patterns]\n\n file_list = []\n unmatched_patterns = []\n for pattern in patterns:\n matches = [match for match in glob.glob(pattern, recursive=True) if os.path.isfile(match)]\n\n if matches:\n file_list.extend(matches)\n else:\n unmatched_patterns.append(pattern)\n\n if unmatched_patterns:\n raise ValueError(\"The following patterns did not match any files: {}.\".format(unmatched_patterns))\n\n if file_list: # not empty\n return file_list\n raise ValueError(\"The list of path names matching the patterns is empty.\")\n\n def is_shuffled(self):\n return self.shuffle_flag > 0\n\n def is_sharded(self):\n if self.num_shards is not None:\n return self.num_shards > 1\n return False\n\n\nclass MappableDataset(SourceDataset):\n \"\"\"\n Abstract class to represent a source dataset which supports use of samplers.\n \"\"\"\n\n def parse(self, children=None):\n raise NotImplementedError(\"Dataset has to implement parse method.\")\n\n def __init__(self, num_parallel_workers=None, sampler=None, num_samples=None, shuffle=None, num_shards=None,\n shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,\n num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.shuffle_flag = replace_none(shuffle, True)\n self.sampler = samplers.select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)\n\n def add_sampler(self, new_sampler):\n \"\"\"\n Add a sampler for current dataset,.\n\n Args:\n new_sampler (Sampler): The sampler to be added as the parent sampler for current dataset.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> # use a DistributedSampler instead\n >>> new_sampler = ds.DistributedSampler(10, 2)\n >>> dataset.add_sampler(new_sampler)\n \"\"\"\n # note: By adding a sampler, the sampled IDs will flow to new_sampler\n # after first passing through the current samplers attached to this dataset.\n self.dataset_size = None\n new_sampler.add_child(self.sampler)\n self.sampler = new_sampler\n\n def use_sampler(self, new_sampler):\n \"\"\"\n Make the current dataset use the new_sampler provided by other API.\n\n Args:\n new_sampler (Sampler): The sampler to use for the current dataset.\n\n Examples:\n >>> # dataset is an instance object of Dataset\n >>> # use a DistributedSampler instead\n >>> new_sampler = ds.DistributedSampler(10, 2)\n >>> dataset.use_sampler(new_sampler)\n \"\"\"\n if new_sampler is None:\n raise TypeError(\"Input sampler can not be None.\")\n if not isinstance(new_sampler, (samplers.BuiltinSampler, samplers.Sampler)):\n raise TypeError(\"Input sampler is not an instance of a sampler.\")\n self.dataset_size = None\n\n self.sampler = self.sampler.child_sampler\n self.add_sampler(new_sampler)\n\n def is_shuffled(self):\n return self.sampler.is_shuffled()\n\n def is_sharded(self):\n return self.sampler.is_sharded()\n\n @check_split\n def split(self, sizes, randomize=True):\n \"\"\"\n Split the dataset into smaller, non-overlapping datasets.\n\n Args:\n sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is\n provided, the dataset will be split into n datasets of size s1, size s2, …, size sn\n respectively. If the sum of all sizes does not equal the original dataset size, an\n error will occur.\n If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1\n and must sum to 1, otherwise an error will occur. The dataset will be split into n\n Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the\n original dataset.\n If after rounding:\n\n - Any size equals 0, an error will occur.\n - The sum of split sizes < K, the difference will be added to the first split.\n - The sum of split sizes > K, the difference will be removed from the first large\n enough split such that it will have at least 1 row after removing the difference.\n\n randomize (bool, optional): Determines whether or not to split the data randomly (default=True).\n If True, the data will be randomly split. Otherwise, each split will be created with\n consecutive rows from the dataset.\n\n Note:\n 1. There is an optimized split function, which will be called automatically when the dataset\n that calls this function is a MappableDataset.\n 2. Dataset should not be sharded if split is going to be called. Instead, create a\n DistributedSampler and specify a split to shard after splitting. If the dataset is\n sharded after a split, it is strongly recommended setting the same seed in each instance\n of execution, otherwise each shard may not be part of the same split (see Examples).\n 3. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.\n Shuffling the dataset may not be deterministic, which means the data in each split\n will be different in each epoch. Furthermore, if sharding occurs after split, each\n shard may not be part of the same split.\n\n Raises:\n RuntimeError: If get_dataset_size returns None or is not supported for this dataset.\n RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not\n equal the dataset size.\n RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.\n RuntimeError: If the dataset is sharded prior to calling split.\n ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the\n floats don't sum to 1.\n\n Returns:\n tuple(Dataset), a tuple of datasets that have been split.\n\n Examples:\n >>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!\n >>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir, shuffle=False)\n >>>\n >>> # Set the seed, and tell split to use this seed when randomizing.\n >>> # This is needed because sharding will be done later\n >>> ds.config.set_seed(58)\n >>> train_dataset, test_dataset = dataset.split([0.9, 0.1])\n >>>\n >>> # To shard the train dataset, use a DistributedSampler\n >>> train_sampler = ds.DistributedSampler(10, 2)\n >>> train_dataset.use_sampler(train_sampler)\n \"\"\"\n if self.is_shuffled():\n logger.warning(\"Dataset is shuffled before split.\")\n\n if self.is_sharded():\n raise RuntimeError(\"Dataset should not be sharded before split.\")\n\n absolute_sizes = self._get_absolute_split_sizes(sizes)\n splits = []\n current_split_start_index = 0\n for size in absolute_sizes:\n ds = copy.deepcopy(self)\n ds.dataset_size = None\n if randomize:\n # want to shuffle the same way every epoch before split, we are assuming\n # that the user will call set_seed\n random_sampler = samplers.RandomSampler()\n random_sampler.reshuffle_each_epoch = False\n ds.add_sampler(random_sampler)\n\n subset_sampler = samplers.SequentialSampler(current_split_start_index, size)\n ds.add_sampler(subset_sampler)\n\n # add sequential sampler, so that if user calls use_sampler, we will\n # get rid of the sequential sampler instead of something we need\n ds.add_sampler(samplers.SequentialSampler())\n\n splits.append(ds)\n\n current_split_start_index += size\n\n return tuple(splits)\n\n\nclass BucketBatchByLengthDataset(Dataset):\n \"\"\"\n The result of applying BucketBatchByLength operator to the input dataset.\n \"\"\"\n\n def __init__(self, input_dataset, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function,\n pad_info, pad_to_bucket_boundary, drop_remainder):\n super().__init__(children=input_dataset)\n\n self.column_names = to_list(column_names)\n self.bucket_boundaries = replace_none(bucket_boundaries, [])\n self.bucket_batch_sizes = replace_none(bucket_batch_sizes, [])\n self.element_length_function = element_length_function\n self.pad_info = replace_none(pad_info, {})\n self.pad_to_bucket_boundary = replace_none(pad_to_bucket_boundary, False)\n self.drop_remainder = replace_none(drop_remainder, False)\n\n def parse(self, children=None):\n return cde.BucketBatchByLengthNode(children[0], self.column_names, self.bucket_boundaries,\n self.bucket_batch_sizes, self.element_length_function, self.pad_info,\n self.pad_to_bucket_boundary, self.drop_remainder)\n\n\nclass BatchDataset(Dataset):\n \"\"\"\n The result of applying Batch operator to the input dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be batched.\n batch_size (Union[int, function]): The number of rows each batch is created with. An\n int or callable which takes exactly 1 parameter, BatchInfo.\n drop_remainder (bool, optional): Determines whether or not to drop the last\n possibly incomplete batch (default=False). If True, and if there are less\n than batch_size rows available to make the last batch, then those rows will\n be dropped and not propagated to the child node.\n num_parallel_workers (int, optional): Number of workers to process the dataset in parallel (default=None).\n per_batch_map (callable, optional): Per batch map callable. A callable which takes\n (list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch of\n Tensors on a given column. The number of lists should match with number of entries in input_columns. The\n last parameter of the callable must always be a BatchInfo object.\n input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list must\n match with signature of per_batch_map callable.\n output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by\n the last operation. This parameter is mandatory if len(input_columns) !=\n len(output_columns). The size of this list must match the number of output\n columns of the last operation. (default=None, output columns will have the same\n name as the input columns, i.e., the columns will be replaced).\n column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole\n dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here\n is not just the columns specified in parameter input_columns and output_columns.\n pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={\"col1\":([224,224],0)}\n will pad column with name \"col1\" to a tensor of size [224,224] and fill the missing with 0.\n max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy\n data between processes. This is only used if python_multiprocessing is set to True (default=16).\n\n \"\"\"\n\n def __init__(self, input_dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,\n input_columns=None, output_columns=None, column_order=None, pad_info=None,\n python_multiprocessing=False, max_rowsize=16):\n super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)\n\n if BatchDataset._is_ancestor_of_repeat(input_dataset):\n logger.warning(\"Repeat is located before batch, data from two epochs can be batched together.\")\n\n BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)\n\n # if batch_size is callable, set batch_size to 1 and batch_size_func to that callable function\n self.batch_size = batch_size if not callable(batch_size) else 1\n self.batch_size_func = None if not callable(batch_size) else batch_size\n\n self.drop_remainder = replace_none(drop_remainder, False)\n\n self.per_batch_map = per_batch_map\n\n self.input_columns = to_list(input_columns)\n self.output_columns = to_list(output_columns)\n self.column_order = to_list(column_order)\n\n self.pad = bool(pad_info is not None)\n self.pad_info = replace_none(pad_info, dict())\n\n self.python_multiprocessing = python_multiprocessing\n self.process_pool = None\n self.hook = None\n self.pids = []\n self.eot = None\n self.watch_dog = None\n self.max_rowsize = max_rowsize\n\n def parse(self, children=None):\n return cde.BatchNode(children[0], self.batch_size, self.drop_remainder, self.pad, self.input_columns,\n self.output_columns, self.column_order, self.batch_size_func, self.per_batch_map,\n self.pad_info)\n\n @staticmethod\n def _is_ancestor_of_repeat(dataset):\n \"\"\"\n Utility function to find the case where repeat is used before batch.\n\n Args:\n dataset (Dataset): Dataset to be checked.\n\n Returns:\n bool, whether repeat is used before batch.\n \"\"\"\n if isinstance(dataset, RepeatDataset):\n return True\n flag = False\n for input_dataset in dataset.children:\n flag = flag | BatchDataset._is_ancestor_of_repeat(input_dataset)\n return flag\n\n @staticmethod\n def _update_batch_size_for_syncwait(dataset, batch_size):\n \"\"\"\n Utility function to notify batch size to sync_wait.\n\n Args:\n dataset (Dataset): Dataset to be checked.\n batch_size (int): batch size to notify.\n \"\"\"\n if isinstance(dataset, SyncWaitDataset):\n dataset.update_sync_batch_size(batch_size)\n for input_dataset in dataset.children:\n BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)\n\n def __deepcopy__(self, memodict):\n return self.__safe_deepcopy__(memodict, exclude=(\"per_batch_map\", \"batch_size_func\", \"__transfer_dataset__\"))\n\n # Iterator bootstrap will be called on iterator construction.\n # A deep copy of Dataset object is created prior of iterator_bootstrap.\n # This method will create per iterator process pool and bind pyfunc execution to the pool.\n def iterator_bootstrap(self):\n \"\"\"\n Per iterator bootstrap callback.\n \"\"\"\n if self.python_multiprocessing:\n if self.per_batch_map is None:\n logger.warning(\"per_batch_map is None so python_multiprocessing does not work.\")\n return\n arg_q_list = []\n res_q_list = []\n\n # If user didn't specify num_parallel_workers, set it to default\n if self.num_parallel_workers is not None:\n num_parallel = self.num_parallel_workers\n else:\n num_parallel = get_num_parallel_workers()\n\n if get_enable_shared_mem():\n _check_shm_usage(num_parallel, 1, self.max_rowsize * self.batch_size, 2)\n for _ in range(num_parallel):\n arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))\n res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))\n\n # Construct pool with the callable list\n # The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses\n self.process_pool = multiprocessing.Pool(processes=num_parallel,\n initializer=_pyfunc_worker_init,\n initargs=([self.per_batch_map], arg_q_list, res_q_list))\n\n idx = 0\n global _OP_NAME, _OP_PROCESS, _LOCK\n op_id = _OP_NAME[str(self)]\n process_id = {op_id: [self.num_parallel_workers, set()]}\n # obtain process id from multiprocessing.pool\n for pool in self.process_pool._pool: # pylint: disable=W0212\n process_id[op_id][1].add(pool.pid)\n self.pids.append(pool.pid)\n with _LOCK:\n _OP_PROCESS.update(process_id)\n\n # Wrap per_batch_map into _PythonCallable\n self.per_batch_map = _PythonCallable(self.per_batch_map, idx, self.process_pool, arg_q_list, res_q_list)\n self.hook = _ExceptHookHandler()\n atexit.register(_mp_pool_exit_preprocess)\n # If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.\n if sys.version_info >= (3, 8):\n atexit.register(self.process_pool.close)\n if platform.system().lower() != 'windows':\n self.eot = threading.Event()\n self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))\n self.watch_dog.daemon = True\n self.watch_dog.start()\n else:\n if self.per_batch_map is not None:\n self.per_batch_map = FuncWrapper(self.per_batch_map)\n\n def _abort_watchdog(self):\n if not self.eot.is_set():\n self.eot.set()\n\n def __del__(self):\n if hasattr(self, 'process_pool') and self.process_pool is not None:\n self.process_pool.close()\n if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:\n self._abort_watchdog()\n\n\nclass BatchInfo(cde.CBatchInfo):\n \"\"\"\n The information object associates with the current batch of tensors.\n \"\"\"\n\n def get_batch_num(self):\n \"\"\"\n Return the batch number of the current batch.\n \"\"\"\n return\n\n def get_epoch_num(self):\n \"\"\"\n Return the epoch number of the current batch.\n \"\"\"\n return\n\n\nclass BlockReleasePair:\n \"\"\"\n The blocking condition class used by SyncWaitDataset.\n\n Args:\n init_release_rows (int): Number of lines to allow through the pipeline.\n callback (function): The callback function that will be called when release is called (default=None).\n \"\"\"\n\n def __init__(self, init_release_rows, callback=None):\n if isinstance(init_release_rows, int) and init_release_rows <= 0:\n raise ValueError(\"release_rows need to be greater than 0.\")\n self.row_count = -init_release_rows\n self.cv = threading.Condition()\n self.callback = callback\n self.default_rows = init_release_rows\n self.disable = False\n\n def __deepcopy__(self, memodict):\n return self\n\n def reset(self):\n with self.cv:\n self.row_count = -self.default_rows\n self.cv.notify_all()\n\n def update_batched_size(self, batch_size):\n # sanity check\n if isinstance(batch_size, int) and batch_size <= 0:\n raise ValueError(\"batch_size need to be greater than 0.\")\n\n # should only use before the pipeline creates\n self.row_count *= batch_size\n self.default_rows *= batch_size\n\n def block_func(self):\n \"\"\"\n Function for handing blocking condition.\n\n Returns:\n bool, True.\n \"\"\"\n with self.cv:\n # if disable is true, the always evaluate to true\n not_time_out = self.cv.wait_for(lambda: (self.row_count < 0 or self.disable),\n timeout=get_callback_timeout())\n # time_out will be False if time out occurs\n if not not_time_out:\n logger.warning(\"Timeout happened in sync_wait, maybe dataset.sync_update(condition=...) \"\n \"is not added after dataset.create_dict_iterator(...), now disabling lock.\")\n self.disable = True\n self.row_count += 1\n return True\n\n def release_func(self, pass_rows=None, data=None):\n with self.cv:\n if pass_rows is None:\n pass_rows = self.default_rows\n self.row_count -= pass_rows\n if self.callback is not None:\n self.callback(data)\n self.cv.notify_all()\n\n def disable_lock(self):\n with self.cv:\n self.disable = True\n self.cv.notify_all()\n\n\nclass SyncWaitDataset(Dataset):\n \"\"\"\n The result of adding a blocking condition to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input dataset to apply flow control.\n num_batch (int): Number of batches without blocking at the start of each epoch.\n condition_name (str): Condition name that is used to toggle sending next row.\n callback (function): Callback function that will be invoked when sync_update is called (default=None).\n\n Raises:\n RuntimeError: If condition name already exists.\n \"\"\"\n\n def __init__(self, input_dataset, condition_name, num_batch, callback=None):\n super().__init__(children=input_dataset)\n\n # set to the default value, waiting for the batch to update it\n self._condition_name = condition_name\n if isinstance(num_batch, int) and num_batch <= 0:\n raise ValueError(\"num_batch need to be greater than 0.\")\n\n self._pair = BlockReleasePair(num_batch, callback)\n if self._condition_name in self.children[0].get_sync_notifiers():\n raise RuntimeError(\"Condition name is already in use.\")\n logger.info(\"Please remember to add dataset.sync_update(condition=%s), otherwise hanging will result. \"\n \"If dataset.sync_update(condition=%s) has already been added, you can ignore the info.\",\n condition_name, condition_name)\n\n def parse(self, children=None):\n return cde.SyncWaitNode(children[0], self._condition_name, self._pair.block_func)\n\n def get_sync_notifiers(self):\n return {**self.children[0].get_sync_notifiers(), **{self._condition_name: self._pair.release_func}}\n\n def is_sync(self):\n return True\n\n def update_sync_batch_size(self, batch_size):\n if isinstance(batch_size, int) and batch_size <= 0:\n raise ValueError(\"num_batch need to be greater than 0.\")\n self._pair.update_batched_size(batch_size)\n\n def disable_sync(self):\n logger.info(\"Disabling Sync\")\n self._pair.disable_lock()\n\n @staticmethod\n def _is_ancestor_of_batch(dataset):\n \"\"\"\n Utility function to find the case where sync_wait is used before batch.\n\n Args:\n dataset (Dataset): Dataset to be checked.\n\n Returns:\n bool, whether sync_wait is used before batch.\n \"\"\"\n if isinstance(dataset, BatchDataset):\n return True\n flag = False\n for input_dataset in dataset.children:\n flag = flag | SyncWaitDataset._is_ancestor_of_batch(input_dataset)\n return flag\n\n def iterator_bootstrap(self):\n self._pair.reset()\n\n\nclass ShuffleDataset(Dataset):\n \"\"\"\n The result of applying Shuffle operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be shuffled.\n buffer_size (int): Size of the buffer.\n\n Raises:\n RuntimeError: If exist sync operators before shuffle.\n \"\"\"\n\n def __init__(self, input_dataset, buffer_size):\n super().__init__(children=input_dataset)\n self.buffer_size = buffer_size\n self.reshuffle_each_epoch = True\n\n if self.is_sync():\n raise RuntimeError(\"No shuffle after sync operators.\")\n\n def parse(self, children=None):\n return cde.ShuffleNode(children[0], self.buffer_size, self.reshuffle_each_epoch)\n\n def is_shuffled(self):\n return True\n\n\n# This wait function is for cleaning zombie subprocesses\ndef wait_pid():\n try:\n while True:\n child_pid, _ = os.waitpid(-1, os.WNOHANG)\n if child_pid == 0:\n break\n except OSError:\n # waitpid may be failed for some reasons so we ignore this error\n pass\n\n\n# Dataset need _watch_dog thread to monitoring fork multi-processing,\n# and thread can't be a member function otherwise python won't collect and release resources.\ndef _watch_dog(eot, pids):\n \"\"\"\n This thread is for monitoring subprocesses forked by GeneratorDataset/MapDataset/BatchDataset\n \"\"\"\n while not eot.is_set():\n subprocess_exit_num = 0\n # Monitoring and count how many subprocesses already exit\n for pid in pids:\n try:\n p = psutil.Process(pid)\n if p.status() == psutil.STATUS_ZOMBIE:\n subprocess_exit_num += 1\n except psutil.NoSuchProcess:\n subprocess_exit_num += 1\n # If find subprocess exit, we will wait for 30s and do some waitpid operations\n if subprocess_exit_num > 0:\n start = time.time()\n while time.time() - start < 30:\n # We need to distinguishing get_dataset_size or train finished normally and hang scenario.\n # If get_dataset_size or train finished normally, _stop_subprocess can be execute and\n # self.need_abort can be set to True. If main process is hang in get(), self.need_abort\n # will never set to True, then we wait for 30s and kill main process\n if eot.is_set():\n return\n # Sometimes subprocess may be zombie, so in 30s we can wait and do some useful tasks(waitpid).\n wait_pid()\n ## multiprocessing.queue may hang in .get() forever when put() process was killed.\n ## We have to exit main process otherwise main process will hang.\n logger.exception(\"The subprocess of dataset may exit unexpected or be killed, \"\n \"main process will exit.\")\n os.kill(os.getpid(), signal.SIGTERM)\n\n\n# Pyfunc collection for multiprocess pyfunc\n# This global variable will only be used within subprocesses\n_GLOBAL_PYFUNC_LIST = []\n_ARGS_QUEUE = []\n_RET_QUEUE = []\n_OP_NAME = dict()\n_OP_PROCESS = dict()\n_LOCK = threading.Lock()\n\n\n# Pyfunc worker init function\n# Python multiprocessing library forbid sending lambda function through pipe.\n# This init function allow us to add all Python function to a global collection and then fork afterwards.\ndef _pyfunc_worker_init(pyfunc_list, args_queue, ret_queue):\n global _GLOBAL_PYFUNC_LIST\n global _ARGS_QUEUE\n global _RET_QUEUE\n _GLOBAL_PYFUNC_LIST = pyfunc_list\n _ARGS_QUEUE = args_queue\n _RET_QUEUE = ret_queue\n\n\n# Pyfunc worker execution function\n# All exceptions will be raised to main processes\ndef _pyfunc_worker_exec(index, qid, *args):\n \"\"\"\n Internal function for call certain pyfunc in Python process.\n \"\"\"\n # Some threads in multiprocess.pool can't process sigint signal,\n # and will occur hang problem, so ctrl+c will pass to parent process.\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n if qid != -1:\n # Pass arguments through the Queue instead of directly to remote process\n args = _ARGS_QUEUE[qid].get()\n try:\n r = _GLOBAL_PYFUNC_LIST[index](*args)\n except Exception:\n return ExceptionHandler(where=\"in map(or batch) worker and execute python function\")\n if isinstance(r, tuple):\n _RET_QUEUE[qid].put(r)\n else:\n _RET_QUEUE[qid].put((r,))\n return [qid]\n # not using shared memory for passing arguments, call function directly\n result = None\n try:\n result = _GLOBAL_PYFUNC_LIST[index](*args)\n except Exception:\n result = ExceptionHandler(where=\"in map(or batch) worker and execute python function\")\n return result\n\n\n# PythonCallable wrapper for multiprocess pyfunc\nclass _PythonCallable:\n \"\"\"\n Internal Python function wrapper for multiprocessing pyfunc.\n \"\"\"\n\n def __init__(self, py_callable, idx, pool=None, arg_q=None, res_q=None):\n # Original Python callable from user.\n self.py_callable = py_callable\n # Process pool created for current iterator.\n self.pool = pool\n # Python callable index for subprocess _GLOBAL_PYFUNC_LIST\n self.idx = idx\n\n if pool is not None:\n self.queuemap = {}\n self.arg_q = arg_q\n self.res_q = res_q\n self.next_queue = 0\n\n def __call__(self, *args):\n if self._pool_is_running() and check_iterator_cleanup() is False:\n # arg_q will have 0 size if we are not using shared memory\n # if using multi-processing shared queue instead of multiprocess arg passing\n if self.arg_q != []:\n tid = threading.get_ident()\n # Need to register each thread to use a different queue to send data to pool\n if not tid in self.queuemap:\n qid = self.next_queue\n self.next_queue = self.next_queue + 1\n self.queuemap[tid] = qid\n else:\n qid = self.queuemap[tid]\n self.arg_q[qid].put(args)\n\n # This call will send the tensors along with Python callable index to the process pool.\n # Block, yield GIL. Current thread will reacquire GIL once result is returned.\n if self._pool_is_running() and check_iterator_cleanup() is False:\n result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, qid, []])\n else:\n return self.py_callable(*args)\n else:\n result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, -1, *args])\n\n # todo this check might be wrong\n while check_iterator_cleanup() is False:\n try:\n if self.arg_q != []:\n r = result.get(30)\n if isinstance(r, ExceptionHandler):\n r.reraise()\n if r[0] != qid:\n raise Exception(\"In PyCallable, got results from wrong thread\")\n r = self.res_q[qid].get()\n return r\n r = result.get(30)\n if isinstance(r, ExceptionHandler):\n r.reraise()\n return r\n except multiprocessing.TimeoutError:\n continue\n except KeyboardInterrupt:\n _set_iterator_cleanup()\n self.pool.close()\n self.pool.join()\n raise Exception(\"Multiprocess MapOp worker receives KeyboardInterrupt.\")\n return (None,)\n # Invoke original Python callable in master process in case the pool is gone.\n return self.py_callable(*args)\n\n def to_json(self):\n return self.py_callable.to_json()\n\n def _pool_is_running(self):\n # note here: the RUN state of python3.7 and python3.8 is different:\n # python3.7: RUN = 0\n # python3.8: RUN = \"RUN\"\n # so we use self.pool._state == RUN instead and we can't use _state == 0 any more.\n if self.pool is not None and self.pool._state == RUN: # pylint: disable=W0212\n return True\n return False\n\n\ndef _mp_pool_exit_preprocess():\n if check_iterator_cleanup() is False:\n # Set the iterator_cleanup flag to True before exiting, and wait 3s for all apply_async\n # applied to the multiprocessing task to prevent multiprocessing from hang when exiting\n _set_iterator_cleanup()\n time.sleep(3)\n\n\nclass _ExceptHookHandler:\n def __init__(self):\n sys.excepthook = self.__handler_exception\n\n def __handler_exception(self, ex_type, value, tb):\n logger.exception(\"Uncaught exception: \", exc_info=(ex_type, value, tb))\n _mp_pool_exit_preprocess()\n\n\nclass MapDataset(Dataset):\n \"\"\"\n The result of applying the Map operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be mapped.\n operations (TensorOp): A function mapping a nested structure of tensors\n to another nested structure of tensor (default=None).\n input_columns (Union[str, list[str]]): List of names of the input columns\n (default=None, the operations will be applied on the first columns in the dataset).\n The size of the list should match the number of inputs of the first operator.\n output_columns (Union[str, list[str]], optional): List of names of the output columns.\n The size of the list should match the number of outputs of the last operator\n (default=None, output columns will be the input columns, i.e., the columns will\n be replaced).\n column_order (list[str], optional): Specifies the list of all the columns you need in the whole\n dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here\n is not just the columns specified in parameter input_columns and output_columns.\n num_parallel_workers (int, optional): Number of workers to process the dataset\n in parallel (default=None).\n python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This\n option could be beneficial if the Python operation is computational heavy (default=False).\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None)\n max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy\n data between processes. This is only used if python_multiprocessing is set to True (default=16).\n\n Raises:\n ValueError: If len(input_columns) != len(output_columns) and column_order is not specified.\n \"\"\"\n\n def __init__(self, input_dataset, operations=None, input_columns=None, output_columns=None, column_order=None,\n num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None, max_rowsize=16):\n super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers, cache=cache)\n self.operations = to_list(operations)\n self.operations = py_transforms.Compose.reduce(self.operations)\n self.input_columns = to_list(input_columns)\n self.output_columns = to_list(output_columns)\n self.column_order = replace_none(column_order, [])\n\n # If output_columns were not provided then use input_columns\n self.output_columns = self.input_columns if not self.output_columns else self.output_columns\n\n if self.input_columns and self.output_columns \\\n and len(self.input_columns) != len(self.output_columns) \\\n and not self.column_order:\n raise ValueError(\"When length of input_columns and output_columns are not equal,\"\n \" column_order must be specified.\")\n\n self.python_multiprocessing = python_multiprocessing\n self.process_pool = None\n self.hook = None\n self.pids = []\n self.eot = None\n self.watch_dog = None\n\n self.callbacks = to_list(callbacks)\n self.max_rowsize = max_rowsize\n\n def parse(self, children=None):\n operations = []\n for op in self.operations:\n if op and getattr(op, 'parse', None):\n operations.append(op.parse())\n else:\n operations.append(op)\n\n callbacks = [cb.create_runtime_obj() for cb in self.callbacks]\n return cde.MapNode(children[0], operations, self.input_columns, self.output_columns, self.column_order,\n callbacks)\n\n def __deepcopy__(self, memodict):\n return self.__safe_deepcopy__(memodict, exclude=(\"operations\", \"callbacks\", \"__transfer_dataset__\"))\n\n # Iterator bootstrap will be called on iterator construction.\n # A deep copy of Dataset object is created prior of iterator_bootstrap.\n # This method will create per iterator process pool and bind pyfunc execution to the pool.\n def iterator_bootstrap(self):\n \"\"\"\n Per iterator bootstrap callback.\n \"\"\"\n\n if self.python_multiprocessing:\n iter_specific_operations = []\n callable_list = []\n arg_q_list = []\n res_q_list = []\n\n # If user didn't specify num_parallel_workers, set it to default\n if self.num_parallel_workers is not None:\n num_parallel = self.num_parallel_workers\n else:\n num_parallel = get_num_parallel_workers()\n\n if get_enable_shared_mem():\n _check_shm_usage(num_parallel, 1, self.max_rowsize, 2)\n for _ in range(num_parallel):\n arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))\n res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))\n\n # Pass #1, look for Python callables and build list\n for op in self.operations:\n # our c transforms is now callable and should not be run in Python multithreading\n if callable(op) and str(op).find(\"c_transform\") < 0:\n callable_list.append(op)\n\n if callable_list:\n # Construct pool with the callable list\n # The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses\n self.process_pool = multiprocessing.Pool(processes=num_parallel,\n initializer=_pyfunc_worker_init,\n initargs=(callable_list, arg_q_list, res_q_list))\n\n # Pass #2\n idx = 0\n global _OP_NAME, _OP_PROCESS, _LOCK\n op_id = _OP_NAME[str(self)]\n # obtain process id from multiprocessing.pool\n process_id = {op_id: [self.num_parallel_workers, set()]}\n for pool in self.process_pool._pool: # pylint: disable=W0212\n process_id[op_id][1].add(pool.pid)\n self.pids.append(pool.pid)\n with _LOCK:\n _OP_PROCESS.update(process_id)\n for op in self.operations:\n # our c transforms is now callable and should not be run in Python multithreading\n if callable(op) and str(op).find(\"c_transform\") < 0:\n # Wrap Python callable into _PythonCallable\n iter_specific_operations.append(_PythonCallable(op, idx, self.process_pool,\n arg_q_list, res_q_list))\n idx += 1\n else:\n # CPP ops remain the same\n iter_specific_operations.append(op)\n self.operations = iter_specific_operations\n self.hook = _ExceptHookHandler()\n atexit.register(_mp_pool_exit_preprocess)\n # If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.\n if sys.version_info >= (3, 8):\n atexit.register(self.process_pool.close)\n if platform.system().lower() != 'windows':\n self.eot = threading.Event()\n self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))\n self.watch_dog.daemon = True\n self.watch_dog.start()\n\n def _abort_watchdog(self):\n if not self.eot.is_set():\n self.eot.set()\n\n def __del__(self):\n if hasattr(self, 'process_pool') and self.process_pool is not None:\n self.process_pool.close()\n self.process_pool.join()\n if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:\n self._abort_watchdog()\n\n\nclass FilterDataset(Dataset):\n \"\"\"\n The result of applying filter predicate to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be mapped.\n predicate (callable): Python callable which returns a boolean value. If False then filter the element.\n input_columns (Union[str, list[str]], optional): List of names of the input columns\n (default=None, the predicate will be applied to all columns in the dataset).\n num_parallel_workers (int, optional): Number of workers to process the dataset\n in parallel (default=None).\n \"\"\"\n\n def __init__(self, input_dataset, predicate, input_columns=None, num_parallel_workers=None):\n super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)\n self.predicate = lambda *args: bool(predicate(*args))\n self.input_columns = to_list(input_columns)\n\n def parse(self, children=None):\n return cde.FilterNode(children[0], self.predicate, self.input_columns)\n\n\nclass RepeatDataset(Dataset):\n \"\"\"\n The result of applying Repeat operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be repeated.\n count (int): Number of times the dataset will be repeated (default=-1, repeat indefinitely).\n \"\"\"\n\n def __init__(self, input_dataset, count):\n super().__init__(children=input_dataset)\n self.count = replace_none(count, -1)\n\n def parse(self, children=None):\n return cde.RepeatNode(children[0], self.count)\n\n\nclass SkipDataset(Dataset):\n \"\"\"\n The result of applying Skip operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input dataset to have elements skipped.\n count (int): Number of elements to be skipped in the dataset.\n \"\"\"\n\n def __init__(self, input_dataset, count):\n super().__init__(input_dataset)\n self.count = count\n\n def parse(self, children=None):\n return cde.SkipNode(children[0], self.count)\n\n\nclass TakeDataset(Dataset):\n \"\"\"\n The result of applying Take operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to have elements taken from.\n count (int): Number of elements to be taken from the dataset.\n \"\"\"\n\n def __init__(self, input_dataset, count):\n super().__init__(children=input_dataset)\n self.count = count\n\n def parse(self, children=None):\n return cde.TakeNode(children[0], self.count)\n\n\nclass ZipDataset(Dataset):\n \"\"\"\n The result of applying Zip operator to the input Dataset.\n\n Args:\n datasets (tuple): A tuple of datasets to be zipped together.\n\n Raises:\n TypeError: If dataset is not an instance of Dataset.\n \"\"\"\n\n def __init__(self, datasets):\n super().__init__(children=datasets)\n\n def parse(self, children=None):\n return cde.ZipNode(children)\n\n def is_sync(self):\n return any([c.is_sync() for c in self.children])\n\n\nclass ConcatDataset(Dataset):\n \"\"\"\n The result of applying concat dataset operator to the input Dataset.\n\n Args:\n datasets (list): A list of datasets to be concatenated together.\n\n Raises:\n TypeError: If dataset is not an instance of Dataset.\n ValueError: If there is no samples in the one of the datasets.\n \"\"\"\n\n def __init__(self, datasets):\n super().__init__(children=datasets)\n for dataset in datasets:\n if not isinstance(dataset, Dataset):\n raise TypeError(\"Invalid dataset, expected Dataset object, but got %s!\" % type(dataset))\n self.datasets = datasets\n self._sampler = samplers.SequentialSampler(num_samples=None)\n\n self.children_sizes_ = [c.get_dataset_size() for c in self.children]\n child_index = 0\n for item in self.children_sizes_:\n if item == 0:\n raise ValueError(\"There are no samples in the dataset number %d. Please make sure there are \"\n \"valid samples in the dataset.\" % child_index)\n child_index += 1\n\n # _children_flag_and_nums: A list of pair<int ,int>.The first element of pair is flag that characterizes\n # whether the data set is mappable. The second element of pair is length of the dataset\n self._children_flag_and_nums = []\n\n # _children_start_end_index_: A list of pair<int ,int>.The elements of pair are used to characterize\n # the valid position of the dataset corresponding to the subscript when sampling\n self._children_start_end_index_ = []\n for index, child in enumerate(self.children):\n tem_list = [-1, -1]\n self._children_start_end_index_.append(tem_list)\n dataset_len = self.children_sizes_[index]\n if isinstance(child, GeneratorDataset) and not hasattr(child.source, \"__getitem__\"):\n dataset_len = 0\n self.children_sizes_[index] = 0\n\n if isinstance(child, MappableDataset):\n self._children_flag_and_nums.append((0, dataset_len))\n else:\n self._children_flag_and_nums.append((1, dataset_len))\n\n def parse(self, children=None):\n return cde.ConcatNode(children, self._sampler, self._children_flag_and_nums, self._children_start_end_index_)\n\n def use_sampler(self, sampler):\n \"\"\"\n Set the distributedSampler to concat dataset\n\n Args:\n sampler (Sampler): The sampler to use for the current dataset.\n Currently supported: DistributedSampler.\n\n Raises:\n TypeError: If the sampler is not an instance of DistributedSampler\n ValueError: If the parameter shuffle of sampler is True\n ValueError: If the parameter NumSamples of sampler is not None.\n ValueError: If num_shards <=0.\n \"\"\"\n if not isinstance(sampler, samplers.DistributedSampler):\n raise TypeError(\"The parameter %s of concat must be DistributedSampler!\" % sampler)\n\n if sampler.is_shuffled():\n raise ValueError(\"The parameter shuffle of DistributedSampler must be False!\")\n\n if sampler.num_shards <= 0:\n raise ValueError(\"The parameter num_shards of DistributedSampler must be positive int!\")\n\n if sampler.get_num_samples() is not None:\n raise ValueError(\"The parameter num_samples of DistributedSampler is not support to be set!\")\n\n self.dataset_size = None\n\n self._sampler = sampler\n cumulative_samples_nums = 0\n for index, child in enumerate(self.children):\n if hasattr(child, 'sampler') and child.sampler.get_num_samples() is not None:\n raise ValueError(\"The parameter NumSamples of %s is not support to be set!\" % child)\n\n if isinstance(child, BatchDataset):\n raise TypeError(\"The parameter %s of concat must not be BatchDataset!\" % child)\n\n # if child is mappable and the length is greater than 0\n if not self._children_flag_and_nums[index][0] and self._children_flag_and_nums[index][1]:\n\n tem_value = cumulative_samples_nums + self._children_flag_and_nums[index][1]\n\n if not self._children_flag_and_nums[index][1] >= sampler.num_shards:\n if tem_value < sampler.num_shards:\n self._children_start_end_index_[index][0] = cumulative_samples_nums\n self._children_start_end_index_[index][1] = tem_value\n else:\n self._children_start_end_index_[index][0] = cumulative_samples_nums\n self._children_start_end_index_[index][1] = tem_value % sampler.num_shards\n\n tem_sampler = copy.deepcopy(sampler)\n tem_sampler.set_offset(cumulative_samples_nums)\n child.use_sampler(tem_sampler)\n\n cumulative_samples_nums += self.children_sizes_[index]\n cumulative_samples_nums %= sampler.num_shards\n\n\nclass RenameDataset(Dataset):\n \"\"\"\n The result of applying Rename operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be Renamed.\n input_columns (Union[str, list[str]]): List of names of the input columns.\n output_columns (Union[str, list[str]]): List of names of the output columns.\n \"\"\"\n\n def __init__(self, input_dataset, input_columns, output_columns):\n super().__init__(children=input_dataset)\n self.input_column_names = to_list(input_columns)\n self.output_column_names = to_list(output_columns)\n\n def parse(self, children=None):\n return cde.RenameNode(children[0], self.input_column_names, self.output_column_names)\n\n\ndef to_list(items):\n if items is None:\n return []\n if isinstance(items, tuple):\n return list(items)\n if not isinstance(items, list):\n return [items]\n return items\n\n\nclass ProjectDataset(Dataset):\n \"\"\"\n The result of applying Project operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be Projected.\n columns (Union[str, list[str]]): List of names of the columns to project.\n \"\"\"\n\n def __init__(self, input_dataset, columns):\n super().__init__(children=input_dataset)\n self.columns = to_list(columns)\n\n def parse(self, children=None):\n return cde.ProjectNode(children[0], self.columns)\n\n\nclass _ToDevice:\n \"\"\"\n Internal class to handle sending data to device.\n \"\"\"\n\n def __init__(self, dataset, num_epochs):\n ir_tree, self.api_tree = dataset.create_ir_tree()\n\n self._runtime_context = cde.PythonRuntimeContext()\n self._runtime_context.Init()\n self._to_device = cde.ToDevice(num_epochs)\n self._to_device.Init(ir_tree)\n self._runtime_context.AssignConsumer(self._to_device)\n\n ITERATORS_LIST.append(weakref.ref(self))\n _unset_iterator_cleanup()\n\n def send(self):\n self._to_device.Send()\n\n def stop_send(self):\n \"\"\"\n send stop send signal to pipeline, it is used when end of sequence is sent at the epoch end.\n \"\"\"\n self._to_device.StopSend()\n\n def continue_send(self):\n \"\"\"\n send continue send signal to pipeline, it is used when end of sequence is sent at the epoch end.\n \"\"\"\n self._to_device.ContinueSend()\n\n def get_data_info(self):\n \"\"\"\n Get type and shape of current batch.\n \"\"\"\n return self._to_device.GetDataInfo()\n\n def release(self):\n \"\"\"\n Manually terminate Device Queue instead of relying on out of scope destruction.\n \"\"\"\n if hasattr(self, '_runtime_context') and self._runtime_context:\n if hasattr(self, '_to_device') and self._to_device:\n self._runtime_context.Terminate()\n del self._to_device\n del self._runtime_context\n\n def __deepcopy__(self, memodict):\n return self\n\n\nclass TransferDataset(Dataset):\n \"\"\"\n The result of applying TDT operator to the input Dataset.\n\n Args:\n input_dataset (Dataset): Input Dataset to be transferred.\n send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).\n create_data_info_queue (bool, optional): Whether to create queue which stores\n types and shapes of data or not (default=False).\n\n Raises:\n TypeError: If device_type is empty.\n ValueError: If device_type is not 'Ascend', 'GPU' or 'CPU'.\n RuntimeError: If dataset is unknown.\n \"\"\"\n\n def __init__(self, input_dataset, send_epoch_end=True, create_data_info_queue=False):\n super().__init__(children=input_dataset)\n self.queue_name = str(uuid.uuid1())\n self.device_type = context.get_context(\"device_target\") if context else \"CPU\"\n self.device_id = context.get_context(\"device_id\") if context else 0\n\n self._send_epoch_end = replace_none(send_epoch_end, True)\n self._create_data_info_queue = create_data_info_queue\n self._to_device = None\n\n def parse(self, children=None):\n total_batch = 0\n if hasattr(self.children[0], \"__total_batch__\"):\n total_batch = self.children[0].__total_batch__\n return cde.TransferNode(children[0], self.queue_name, self.device_type, self.device_id, self._send_epoch_end,\n total_batch, self._create_data_info_queue)\n\n def create_dict_iterator(self, num_epochs=-1, output_numpy=False):\n raise RuntimeError(\"TransferDataset is not iterable.\")\n\n def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):\n raise RuntimeError(\"TransferDataset is not iterable.\")\n\n def __iter__(self):\n raise RuntimeError(\"TransferDataset is not iterable.\")\n\n def output_shapes(self):\n raise RuntimeError(\"TransferDataset does not support obtaining output_shapes.\")\n\n def output_types(self):\n raise RuntimeError(\"TransferDataset does not support obtaining output_types.\")\n\n @check_to_device_send\n def send(self, num_epochs=-1):\n \"\"\"\n Send to device\n \"\"\"\n if Dataset._noop_mode():\n return\n if self._to_device is not None:\n del self._to_device\n self._to_device = _ToDevice(self, num_epochs)\n self._to_device.send()\n\n def stop_send(self):\n if self._to_device is not None:\n self._to_device.stop_send()\n\n def continue_send(self):\n if self._to_device is not None:\n self._to_device.continue_send()\n\n def get_data_info(self):\n \"\"\"\n Get type and shape of current batch\n \"\"\"\n if self._to_device is not None:\n return self._to_device.get_data_info()\n raise RuntimeError(\"Calling get_data_info with bad state.\")\n\n def release(self):\n \"\"\"\n Manually terminate Device Queue instead of relying on out of scope destruction.\n \"\"\"\n if self._to_device is not None:\n self._to_device.release()\n\n\nclass RangeDataset(MappableDataset):\n \"\"\"\n A source dataset that reads and parses datasets stored on disk in a range.\n\n Args:\n start (int): Starting index.\n stop (int): Ending index.\n step (int): Step size in the range specified by start and stop.\n \"\"\"\n\n def __init__(self, start, stop, step):\n super().__init__()\n self.start = start\n self.stop = stop\n self.step = step\n\n def parse(self, children=None):\n raise NotImplementedError(\"Dataset has to implement parse method.\")\n\n def is_shuffled(self):\n return False\n\n def is_sharded(self):\n return False\n\n def get_dataset_size(self):\n if self.dataset_size is None:\n self.dataset_size = math.ceil((self.stop - self.start) / self.step)\n return self.dataset_size\n\n\nclass FashionMnistDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing the FASHION-MNIST dataset.\n\n The generated dataset has two columns :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`. `train` will read from 60,000\n train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.\n (default=None, will read all samples)\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> fashion_mnist_dataset_dir = \"/path/to/fashion_mnist_dataset_directory\"\n >>>\n >>> # Read 3 samples from FASHIONMNIST dataset\n >>> dataset = ds.FashionMnistDataset(dataset_dir=fashion_mnist_dataset_dir, num_samples=3)\n >>>\n >>> # Note: In FASHIONMNIST dataset, each dictionary has keys \"image\" and \"label\"\n\n About Fashion-MNIST dataset:\n\n Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and\n a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes.\n We intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking\n machine learning algorithms. It shares the same image size and structure of training and testing splits.\n\n Here is the original Fashion-MNIST dataset structure.\n You can unzip the dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── fashionmnist_dataset_dir\n ├── t10k-images-idx3-ubyte\n ├── t10k-labels-idx1-ubyte\n ├── train-images-idx3-ubyte\n └── train-labels-idx1-ubyte\n\n Citation:\n\n .. code-block::\n\n @online{xiao2017/online,\n author = {Han Xiao and Kashif Rasul and Roland Vollgraf},\n title = {Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms},\n date = {2017-08-28},\n year = {2017},\n eprintclass = {cs.LG},\n eprinttype = {arXiv},\n eprint = {cs.LG/1708.07747},\n }\n \"\"\"\n\n @check_mnist_cifar_dataset\n def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,\n sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.usage = replace_none(usage, \"all\")\n\n def parse(self, children=None):\n return cde.FashionMnistNode(self.dataset_dir, self.usage, self.sampler)\n\n\nclass ImageFolderDataset(MappableDataset):\n \"\"\"\n A source dataset that reads images from a tree of directories.\n All images within one folder have the same label.\n\n The generated dataset has two columns: :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is of a scalar of uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n extensions (list[str], optional): List of file extensions to be\n included in the dataset (default=None).\n class_indexing (dict, optional): A str-to-int mapping from folder name to index\n (default=None, the folder names will be sorted\n alphabetically and each class will be given a\n unique index starting from 0).\n decode (bool, optional): Decode the images after reading (default=False).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n RuntimeError: If class_indexing is not a dictionary.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> image_folder_dataset_dir = \"/path/to/image_folder_dataset_directory\"\n >>>\n >>> # 1) Read all samples (image files) in image_folder_dataset_dir with 8 threads\n >>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,\n ... num_parallel_workers=8)\n >>>\n >>> # 2) Read all samples (image files) from folder cat and folder dog with label 0 and 1\n >>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,\n ... class_indexing={\"cat\":0, \"dog\":1})\n >>>\n >>> # 3) Read all samples (image files) in image_folder_dataset_dir with extensions .JPEG and .png (case sensitive)\n >>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,\n ... extensions=[\".JPEG\", \".png\"])\n\n About ImageFolderDataset:\n\n You can construct the following directory structure from your dataset files and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── image_folder_dataset_directory\n ├── class1\n │ ├── 000000000001.jpg\n │ ├── 000000000002.jpg\n │ ├── ...\n ├── class2\n │ ├── 000000000001.jpg\n │ ├── 000000000002.jpg\n │ ├── ...\n ├── class3\n │ ├── 000000000001.jpg\n │ ├── 000000000002.jpg\n │ ├── ...\n ├── classN\n ├── ...\n \"\"\"\n\n @check_imagefolderdataset\n def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, sampler=None,\n extensions=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.extensions = replace_none(extensions, [])\n self.class_indexing = replace_none(class_indexing, {})\n self.decode = replace_none(decode, False)\n\n def parse(self, children=None):\n return cde.ImageFolderNode(self.dataset_dir, self.decode, self.sampler, self.extensions, self.class_indexing)\n\n\nclass MnistDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing the MNIST dataset.\n\n The generated dataset has two columns :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 60,000\n train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.\n (default=None, will read all samples)\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> mnist_dataset_dir = \"/path/to/mnist_dataset_directory\"\n >>>\n >>> # Read 3 samples from MNIST dataset\n >>> dataset = ds.MnistDataset(dataset_dir=mnist_dataset_dir, num_samples=3)\n >>>\n >>> # Note: In mnist_dataset dataset, each dictionary has keys \"image\" and \"label\"\n\n About MNIST dataset:\n\n The MNIST database of handwritten digits has a training set of 60,000 examples,\n and a test set of 10,000 examples. It is a subset of a larger set available from\n NIST. The digits have been size-normalized and centered in a fixed-size image.\n\n Here is the original MNIST dataset structure.\n You can unzip the dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── mnist_dataset_dir\n ├── t10k-images-idx3-ubyte\n ├── t10k-labels-idx1-ubyte\n ├── train-images-idx3-ubyte\n └── train-labels-idx1-ubyte\n\n Citation:\n\n .. code-block::\n\n @article{lecun2010mnist,\n title = {MNIST handwritten digit database},\n author = {LeCun, Yann and Cortes, Corinna and Burges, CJ},\n journal = {ATT Labs [Online]},\n volume = {2},\n year = {2010},\n howpublished = {http://yann.lecun.com/exdb/mnist}\n }\n \"\"\"\n\n @check_mnist_cifar_dataset\n def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,\n sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.usage = replace_none(usage, \"all\")\n\n def parse(self, children=None):\n return cde.MnistNode(self.dataset_dir, self.usage, self.sampler)\n\n\nclass PhotoTourDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing the PhotoTour dataset.\n\n The generated dataset with different usage has different output columns.\n If train, the generated dataset has one column :py:obj:`[image]`,\n else three columns :py:obj:`[image1, image2, matches]`.\n The tensor of column :py:obj:`image`, :py:obj:`image1` and :py:obj:`image2` is of the uint8 type.\n The tensor of column :py:obj:`matches` is a scalar of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n name (str): Name of the dataset to load,\n should be one of 'notredame', 'yosemite', 'liberty', 'notredame_harris',\n 'yosemite_harris' or 'liberty_harris'.\n usage (str, optional): Usage of the dataset, can be `train` or `test` (Default=None, will be set to 'train').\n When usage is `train`, number of samples for each `name` is\n {'notredame': 468159, 'yosemite': 633587, 'liberty': 450092, 'liberty_harris': 379587,\n 'yosemite_harris': 450912, 'notredame_harris': 325295}.\n When usage is `test`, will read 100,000 samples for testing.\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the max sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If dataset_dir is not exist.\n ValueError: If usage is not in [\"train\", \"test\"].\n ValueError: If name is not in [\"notredame\", \"yosemite\", \"liberty\",\n \"notredame_harris\", \"yosemite_harris\", \"liberty_harris\"].\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive. The table\n below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'\n :widths: 64 64 1\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> # Read 3 samples from PhotoTour dataset.\n >>> dataset = ds.PhotoTourDataset(dataset_dir=\"/path/to/photo_tour_dataset_directory\",\n ... name='liberty', usage='train', num_samples=3)\n >>>\n >>> # In PhotoTourDataset dataset, if usage is 'train', each dictionary has key \"image\",\n >>> # else has keys \"image1\" \"image2\" and \"matches\".\n\n About PhotoTour dataset:\n\n The data is taken from Photo Tourism reconstructions from Trevi Fountain (Rome), Notre Dame (Paris) and Half\n Dome (Yosemite). Each dataset consists of a series of corresponding patches, which are obtained by projecting\n 3D points from Photo Tourism reconstructions back into the original images.\n\n The dataset consists of 1024 x 1024 bitmap (.bmp) images, each containing a 16 x 16 array of image patches.\n Each patch is sampled as 64 x 64 grayscale, with a canonical scale and orientation. For details of how the scale\n and orientation is established, please see the paper. An associated metadata file info.txt contains the match\n information. Each row of info.txt corresponds to a separate patch, with the patches ordered from left to right and\n top to bottom in each bitmap image. The first number on each row of info.txt is the 3D point ID from which that\n patch was sampled -- patches with the same 3D point ID are projected from the same 3D point (into different images).\n The second number in info.txt corresponds to the image from which the patch was sampled, and is not used at present.\n\n You can unzip the original PhotoTour dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n .\n └── photo_tour_dataset_directory\n ├── liberty/\n │ ├── info.txt // two columns: 3D_point_ID, unused\n │ ├── m50_100000_100000_0.txt // seven columns: patch_ID1, 3D_point_ID1, unused1,\n │ │ // patch_ID2, 3D_point_ID2, unused2, unused3\n │ ├── patches0000.bmp // 1024*1024 pixels, with 16 * 16 patches.\n │ ├── patches0001.bmp\n │ ├── ...\n ├── yosemite/\n │ ├── ...\n ├── notredame/\n │ ├── ...\n ├── liberty_harris/\n │ ├── ...\n ├── yosemite_harris/\n │ ├── ...\n ├── notredame_harris/\n │ ├── ...\n\n Citation:\n\n .. code-block::\n\n @INPROCEEDINGS{4269996,\n author={Winder, Simon A. J. and Brown, Matthew},\n booktitle={2007 IEEE Conference on Computer Vision and Pattern Recognition},\n title={Learning Local Image Descriptors},\n year={2007},\n volume={},\n number={},\n pages={1-8},\n doi={10.1109/CVPR.2007.382971}\n }\n \"\"\"\n\n @check_photo_tour_dataset\n def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,\n shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.name = name\n self.usage = replace_none(usage, \"train\")\n\n def parse(self, children=None):\n return cde.PhotoTourNode(self.dataset_dir, self.name, self.usage, self.sampler)\n\n\nclass Places365Dataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing the Places365 dataset.\n\n The generated dataset has two columns :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str, optional): Usage of this dataset, can be `train-standard`, `train-challenge` or `val`\n (default=None, will be set to 'train-standard').\n small (bool, optional): Use 256 * 256 images (True) or high resolution images (False) (default=False).\n decode (bool, optional): Decode the images after reading (default=True).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the max sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n ValueError: If usage is not in [\"train-standard\", \"train-challenge\", \"val\"].\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> place365_dataset_dir = \"/path/to/place365_dataset_directory\"\n >>>\n >>> # Read 3 samples from Places365 dataset\n >>> dataset = ds.Places365Dataset(dataset_dir=place365_dataset_dir, usage='train-standard',\n ... small=True, decode=True, num_samples=3)\n >>>\n >>> # In places365 dataset, each dictionary has keys \"image\" and \"label\".\n\n About Places365 dataset:\n\n Convolutional neural networks (CNNs) trained on the Places2 Database can be used for scene recognition as well as\n generic deep scene features for visual recognition.\n\n The author releases the data of Places365-Standard and the data of Places365-Challenge to the public.\n Places365-Standard is the core set of Places2 Database, which has been used to train the Places365-CNNs. The author\n will add other kinds of annotation on the Places365-Standard in the future. Places365-Challenge is the competition\n set of Places2 Database, which has 6.2 million extra images compared to the Places365-Standard.\n The Places365-Challenge will be used for the Places Challenge 2016.\n\n You can unzip the original Places365 dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n .\n └─├── categories_places365.txt\n ├── places365_train-standard.txt\n ├── places365_train-challenge.txt\n ├── val_large/\n │ ├── Places365_val_00000001.jpg\n │ ├── Places365_val_00000002.jpg\n │ ├── Places365_val_00000003.jpg\n │ ├── ...\n ├── val_256/\n │ ├── ...\n ├── data_large_standard/\n │ ├── ...\n ├── data_256_standard/\n │ ├── ...\n ├── data_large_challenge/\n │ ├── ...\n ├── data_256_challenge /\n │ ├── ...\n\n Citation:\n\n .. code-block::\n\n article{zhou2017places,\n title={Places: A 10 million Image Database for Scene Recognition},\n author={Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},\n journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},\n year={2017},\n publisher={IEEE}\n }\n \"\"\"\n\n @check_places365_dataset\n def __init__(self, dataset_dir, usage=None, small=True, decode=False, num_samples=None, num_parallel_workers=None,\n shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = os.path.abspath(dataset_dir)\n self.usage = replace_none(usage, \"train-standard\")\n self.small = small\n self.decode = decode\n\n def parse(self, children=None):\n return cde.Places365Node(self.dataset_dir, self.usage, self.small, self.decode, self.sampler)\n\n\nclass QMnistDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing the QMNIST dataset.\n\n The generated dataset has two columns :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar when `compat` is True else a tensor both of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str, optional): Usage of this dataset, can be `train`, `test`, `test10k`, `test50k`, `nist`\n or `all` (default=None, will read all samples).\n compat (bool, optional): Whether the label for each example is class number (compat=True) or the full QMNIST\n information (compat=False) (default=True).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> qmnist_dataset_dir = \"/path/to/qmnist_dataset_directory\"\n >>>\n >>> # Read 3 samples from QMNIST train dataset\n >>> dataset = ds.QMnistDataset(dataset_dir=qmnist_dataset_dir, num_samples=3)\n >>>\n >>> # Note: In QMNIST dataset, each dictionary has keys \"image\" and \"label\"\n\n About QMNIST dataset:\n\n The QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to\n match the MNIST preprocessing as closely as possible.\n Through an iterative process, researchers tried to generate an additional 50k images of MNIST-like data.\n They started with a reconstruction process given in the paper and used the Hungarian algorithm to find the best\n matches between the original MNIST samples and their reconstructed samples.\n\n Here is the original QMNIST dataset structure.\n You can unzip the dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── qmnist_dataset_dir\n ├── qmnist-train-images-idx3-ubyte\n ├── qmnist-train-labels-idx2-int\n ├── qmnist-test-images-idx3-ubyte\n ├── qmnist-test-labels-idx2-int\n ├── xnist-images-idx3-ubyte\n └── xnist-labels-idx2-int\n\n Citation:\n\n .. code-block::\n\n @incollection{qmnist-2019,\n title = \"Cold Case: The Lost MNIST Digits\",\n author = \"Chhavi Yadav and L\\'{e}on Bottou\",\\\n booktitle = {Advances in Neural Information Processing Systems 32},\n year = {2019},\n publisher = {Curran Associates, Inc.},\n }\n \"\"\"\n\n @check_qmnist_dataset\n def __init__(self, dataset_dir, usage=None, compat=True, num_samples=None, num_parallel_workers=None,\n shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.usage = replace_none(usage, \"all\")\n self.compat = compat\n\n def parse(self, children=None):\n return cde.QMnistNode(self.dataset_dir, self.usage, self.compat, self.sampler)\n\n\nclass MindDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing MindRecord dataset.\n\n The columns of generated dataset depend on the source MindRecord files.\n\n Args:\n dataset_file (Union[str, list[str]]): If dataset_file is a str, it represents for\n a file name of one component of a mindrecord source, other files with identical source\n in the same path will be found and loaded automatically. If dataset_file is a list,\n it represents for a list of dataset files to be read directly.\n columns_list (list[str], optional): List of columns to be read (default=None).\n num_parallel_workers (int, optional): The number of readers (default=None).\n shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch\n (default=None, performs global shuffle).\n If shuffle is False, no shuffling will be performed;\n If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL\n Otherwise, there are three levels of shuffling:\n\n - Shuffle.GLOBAL: Global shuffle of all rows of data in dataset.\n\n - Shuffle.FILES: Shuffle the file sequence but keep the order of data within each file.\n\n - Shuffle.INFILE: Keep the file sequence the same but shuffle the data within each file.\n\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, 'num_samples' reflects the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, sampler is exclusive\n with shuffle and block_reader). Support list: SubsetRandomSampler,\n PkSampler, RandomSampler, SequentialSampler, DistributedSampler.\n padded_sample (dict, optional): Samples will be appended to dataset, where\n keys are the same as column_list.\n num_padded (int, optional): Number of padding samples. Dataset size\n plus num_padded should be divisible by num_shards.\n num_samples (int, optional): The number of samples to be included in the dataset\n (default=None, all samples).\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_files are not valid or do not exist.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> mind_dataset_dir = [\"/path/to/mind_dataset_file\"] # contains 1 or multiple MindRecord files\n >>> dataset = ds.MindDataset(dataset_file=mind_dataset_dir)\n \"\"\"\n\n def parse(self, children=None):\n return cde.MindDataNode(self.dataset_file, self.columns_list, self.sampler, self.new_padded_sample,\n self.num_padded, shuffle_to_shuffle_mode(self.shuffle_option))\n\n @check_minddataset\n def __init__(self, dataset_file, columns_list=None, num_parallel_workers=None, shuffle=None, num_shards=None,\n shard_id=None, sampler=None, padded_sample=None, num_padded=None, num_samples=None, cache=None):\n if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):\n raise TypeError(\"shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or \"\n \"'Shuffle.FILES' or 'Shuffle.INFILE'.\")\n self.shuffle_option = shuffle\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle_to_bool(shuffle), num_shards=num_shards, shard_id=shard_id, cache=cache)\n if isinstance(dataset_file, list):\n self.load_dataset = False\n else:\n self.load_dataset = True\n self.dataset_file = dataset_file\n self.columns_list = replace_none(columns_list, [])\n\n if shuffle is False:\n logger.warning(\"WARN: global shuffle is not used.\")\n\n if sampler is not None:\n if isinstance(sampler, (\n samplers.SubsetRandomSampler, samplers.SubsetSampler, samplers.PKSampler,\n samplers.DistributedSampler,\n samplers.RandomSampler, samplers.SequentialSampler)) is False:\n raise ValueError(\"The sampler is not supported yet.\")\n\n self.padded_sample = padded_sample\n self.num_padded = replace_none(num_padded, 0)\n\n self.new_padded_sample = {}\n if padded_sample:\n for k, v in padded_sample.items():\n if isinstance(v, np.ndarray):\n self.new_padded_sample[k] = v.tobytes()\n else:\n self.new_padded_sample[k] = v\n\n\ndef _iter_fn(dataset, num_samples):\n \"\"\"\n Generator function wrapper for iterable dataset.\n \"\"\"\n if num_samples is not None and num_samples != 0:\n ds_iter = iter(dataset)\n for _ in range(num_samples):\n try:\n val = next(ds_iter)\n except StopIteration:\n return\n # convert output tensors to ndarrays\n yield _convert_row(val)\n else:\n for val in dataset:\n # convert output tensors to ndarrays\n yield _convert_row(val)\n\n\ndef _generator_fn(generator, num_samples):\n \"\"\"\n Generator function wrapper for generator function dataset.\n \"\"\"\n if num_samples is not None and num_samples != 0:\n gen_iter = generator()\n for _ in range(num_samples):\n try:\n val = next(gen_iter)\n except StopIteration:\n return\n yield val\n else:\n gen_iter = generator()\n for val in gen_iter:\n yield val\n\n\ndef _cpp_sampler_fn(sample_ids, dataset):\n \"\"\"\n Generator function wrapper for mappable dataset with cpp sampler.\n \"\"\"\n if not isinstance(sample_ids, np.ndarray):\n raise RuntimeError(\"Sample IDs are not in a numpy array.\")\n if sample_ids.size == 0:\n raise RuntimeError(\"Sampler passed an empty sample IDs list.\")\n\n for i in sample_ids:\n val = dataset[i]\n # convert output tensors to ndarrays\n yield _convert_row(val)\n\n\ndef _cpp_sampler_fn_mp(sample_ids, sample_fn):\n \"\"\"\n Multiprocessing generator function wrapper for mappable dataset with cpp sampler.\n \"\"\"\n if not isinstance(sample_ids, np.ndarray):\n raise RuntimeError(\"Sample IDs are not in a numpy array.\")\n if sample_ids.size == 0:\n raise RuntimeError(\"Sampler passed an empty sample IDs list.\")\n\n return sample_fn.process(sample_ids)\n\n\ndef _fill_worker_indices(workers, indices, idx):\n \"\"\"\n Worker index queue filler, fill worker index queue in round robin order.\n \"\"\"\n num_worker = len(workers)\n while idx < len(indices):\n try:\n workers[idx % num_worker].put(indices[idx])\n idx += 1\n except queue.Full:\n break\n return idx\n\n\ndef _check_shm_usage(num_worker, queue_size, max_rowsize, num_queues=1):\n \"\"\"\n Check sufficient shared memory is available for shared memory queues\n when training in parallel mode.\n \"\"\"\n threshold_ratio = 0.8\n if platform.system() != \"Windows\":\n shm_estimate_usage = _get_device_num() * num_worker * num_queues * \\\n (queue_size + 2) * max_rowsize * 1024 * 1024\n try:\n shm_available = psutil.disk_usage('/dev/shm').free\n if shm_estimate_usage >= threshold_ratio * shm_available:\n raise RuntimeError(\n \"Insufficient shared memory available. Required: {}, Available: {}. \"\n \"The required memory can't exceed 80% of the available shared memory. \"\n \"Recommend to set_enable_shared_mem to False, reduce max_rowsize or reduce num_parallel_workers.\"\n .format(shm_estimate_usage, shm_available))\n except FileNotFoundError:\n logger.warning(\"Expected /dev/shm to exist.\")\n\n\ndef _convert_row(row):\n \"\"\"\n Convert Op return value to numpy\n \"\"\"\n value = []\n # convert each column in row into numpy array\n for x in row:\n if isinstance(x, bytes): # got image bytes from a file\n value.append(np.frombuffer(x, np.uint8))\n elif isinstance(x, Tensor): # got mindspore.Tensor\n value.append(x.asnumpy())\n else:\n value.append(np.array(x, copy=False))\n return tuple(value)\n\n\nclass SamplerFn:\n \"\"\"\n Multiprocessing or multithread generator function wrapper master process.\n \"\"\"\n\n def __init__(self, dataset, num_worker, multi_process, max_rowsize):\n self.workers = []\n self.num_worker = num_worker\n self.multi_process = multi_process\n self.need_join = False\n self.ppid = os.getpid()\n self.pids = []\n # Event for end of epoch\n if multi_process is True:\n try:\n self.eof = multiprocessing.Event()\n except Exception:\n raise RuntimeError(\"Init multiprocessing.Event() failed, This might be caused by insufficient shm,\"\n + \" and the recommended shm size is at least 5 GB.\")\n else:\n self.eof = threading.Event()\n # Create workers\n\n # get default queue size and adjust queuesize per worker if there are large # workers\n queue_size = get_prefetch_size()\n queue_size = min(queue_size, queue_size * 4 // num_worker)\n queue_size = max(2, queue_size)\n\n if multi_process and get_enable_shared_mem():\n _check_shm_usage(num_worker, queue_size, max_rowsize)\n for _ in range(num_worker):\n if multi_process is True:\n try:\n worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size)\n except Exception:\n raise RuntimeError(\"Init multiprocessing.Queue() failed, This might be caused by insufficient shm,\"\n + \" and the recommended shm size is at least 5 GB.\")\n worker.daemon = True\n # When multi processes fork a subprocess, the lock of the main process is copied to the subprocess,\n # which may cause deadlock. Therefore, the subprocess startup is performed in che initialization phase.\n # In this phase, the main process is not locked.\n worker.start()\n self.pids.append(worker.pid)\n self.need_join = True\n else:\n worker = _GeneratorWorkerMt(dataset, self.eof)\n worker.daemon = True\n self.workers.append(worker)\n if multi_process is True and platform.system().lower() != 'windows':\n self.eot = threading.Event()\n self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))\n self.watch_dog.daemon = True\n self.watch_dog.start()\n\n def process(self, indices):\n \"\"\"\n The main process, start the child process or child thread, and fill the index queue.\n Get the result and return.\n \"\"\"\n for w in self.workers:\n # Check whether the queue of the subprocess is empty.\n if not w.queue_empty():\n raise Exception(\"The queue of the subprocess is not empty.\")\n # Start all workers\n if not w.is_alive():\n w.start()\n\n # Fill initial index queues\n idx_cursor = 0\n idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)\n\n # Fetch results\n for i in range(len(indices)):\n if self.eof.is_set():\n self._stop_subprocess()\n return\n if self.multi_process is True and not psutil.pid_exists(self.workers[i % self.num_worker].pid):\n self._stop_subprocess()\n return\n # Fetch result and put index\n try:\n result = self.workers[i % self.num_worker].get()\n if isinstance(result, ExceptionHandler):\n result.reraise()\n except queue.Empty:\n self._stop_subprocess()\n raise Exception(\"Generator worker process timeout.\")\n except KeyboardInterrupt:\n self._stop_subprocess()\n raise Exception(\"Generator worker receives KeyboardInterrupt.\")\n if self.eof.is_set():\n self._stop_subprocess()\n return\n if idx_cursor < len(indices):\n idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)\n yield _convert_row(result)\n\n def _stop_subprocess(self):\n # Only the main process can call join\n if self.need_join is True and self.ppid == os.getpid():\n self.eof.set()\n self.need_join = False\n for w in self.workers:\n if psutil.pid_exists(w.pid):\n w.join()\n self._abort_watchdog()\n\n def _abort_watchdog(self):\n if hasattr(self, 'eot') and self.eot is not None and not self.eot.is_set():\n self.eot.set()\n\n def __del__(self):\n self._stop_subprocess()\n\n\ndef _subprocess_handle(eof, signum, frame):\n threading.Thread(target=eof.set()).start()\n\n\ndef _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiprocessing):\n \"\"\"\n Multithread or multiprocess generator worker process loop.\n \"\"\"\n if is_multiprocessing:\n signal.signal(signal.SIGTERM, partial(_subprocess_handle, eof))\n while True:\n # Fetch index, block\n try:\n idx = idx_queue.get(timeout=1)\n except KeyboardInterrupt:\n if is_multiprocessing:\n eof.set()\n idx_queue.cancel_join_thread()\n result_queue.cancel_join_thread()\n raise Exception(\"Generator worker receives KeyboardInterrupt.\")\n except queue.Empty:\n if eof.is_set():\n if is_multiprocessing:\n idx_queue.cancel_join_thread()\n result_queue.cancel_join_thread()\n return\n # If end-of-file (eof) is not set, continue to get data from idx_queue\n continue\n if idx is None:\n # When the queue is out of scope from master process, a None item can be fetched from the queue.\n # Upon receiving None, worker process should check if eof is set.\n if not eof.is_set():\n raise Exception(\"\")\n return\n if eof.is_set():\n if is_multiprocessing:\n idx_queue.cancel_join_thread()\n result_queue.cancel_join_thread()\n return\n # Fetch data, any exception from __getitem__ will terminate worker and timeout master process\n try:\n result = dataset[idx]\n except Exception:\n result = ExceptionHandler(where=\"in GeneratorDataset worker process\")\n # Send data, block\n while True:\n try:\n result_queue.put(result, timeout=5)\n except KeyboardInterrupt:\n if is_multiprocessing:\n eof.set()\n idx_queue.cancel_join_thread()\n result_queue.cancel_join_thread()\n raise Exception(\"Generator worker receives KeyboardInterrupt.\")\n except queue.Full:\n if eof.is_set():\n if is_multiprocessing:\n idx_queue.cancel_join_thread()\n result_queue.cancel_join_thread()\n return\n # If eof is not set, continue to put data to result_queue\n continue\n break\n del result, idx\n\n\nclass _GeneratorWorkerMt(threading.Thread):\n \"\"\"\n Worker process for multi-thread Generator.\n \"\"\"\n\n def __init__(self, dataset, eof):\n self.idx_queue = queue.Queue(16)\n self.res_queue = queue.Queue(16)\n super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, False))\n\n def put(self, item):\n \"\"\"\n Put function for worker index queue. Never block. Raise queue.Full on failure.\n \"\"\"\n self.idx_queue.put_nowait(item)\n\n def get(self):\n \"\"\"\n Get function for worker result queue. Block with timeout.\n \"\"\"\n return self.res_queue.get(timeout=30)\n\n def queue_empty(self):\n if not self.idx_queue.empty():\n logger.warning(\"idx_queue is not empty\")\n return False\n if not self.res_queue.empty():\n logger.warning(\"res_queue is not empty\")\n return False\n return True\n\n\nclass _GeneratorWorkerMp(multiprocessing.Process):\n \"\"\"\n Worker process for multiprocess Generator.\n \"\"\"\n\n def __init__(self, dataset, eof, max_rowsize, queue_size):\n self.idx_queue = multiprocessing.Queue(queue_size)\n if get_enable_shared_mem():\n self.res_queue = _SharedQueue(queue_size, max_rowsize=max_rowsize)\n else:\n self.res_queue = multiprocessing.Queue(queue_size)\n self.idx_queue._joincancelled = True # pylint: disable=W0212\n self.res_queue._joincancelled = True # pylint: disable=W0212\n super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, True))\n\n def put(self, item):\n \"\"\"\n Put function for worker index queue. Never block. Raise queue.Full on failure.\n \"\"\"\n self.idx_queue.put_nowait(item)\n\n def get(self):\n \"\"\"\n Get function for worker result queue. Block with timeout.\n \"\"\"\n # Relax 10s to 30s, since it sometimes will cause \"Generator worker process timeout\"\n # when we run too many iterators with infinite epoch(num_epoch=-1)\n return self.res_queue.get(timeout=30)\n\n def queue_empty(self):\n if not self.idx_queue.empty():\n logger.warning(\"idx_queue is not empty.\")\n return False\n if not self.res_queue.empty():\n logger.warning(\"res_queue is not empty.\")\n return False\n return True\n\n\nclass GeneratorDataset(MappableDataset):\n \"\"\"\n A source dataset that generates data from Python by invoking Python data source each epoch.\n\n The column names and column types of generated dataset depend on Python data defined by users.\n\n Args:\n source (Union[Callable, Iterable, Random Accessible]):\n A generator callable object, an iterable Python object or a random accessible Python object.\n Callable source is required to return a tuple of NumPy arrays as a row of the dataset on source().next().\n Iterable source is required to return a tuple of NumPy arrays as a row of the dataset on\n iter(source).next().\n Random accessible source is required to return a tuple of NumPy arrays as a row of the dataset on\n source[idx].\n column_names (Union[str, list[str]], optional): List of column names of the dataset (default=None). Users are\n required to provide either column_names or schema.\n column_types (list[mindspore.dtype], optional): List of column data types of the dataset (default=None).\n If provided, sanity check will be performed on generator output.\n schema (Union[Schema, str], optional): Path to the JSON schema file or schema object (default=None). Users are\n required to provide either column_names or schema. If both are provided, schema will be used.\n num_samples (int, optional): The number of samples to be included in the dataset\n (default=None, all images).\n num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.\n (default=None, expected order behavior shown in the table).\n sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible\n input is required (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n Random accessible input is required. When this argument is specified, `num_samples` reflects the maximum\n sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only\n when num_shards is also specified. Random accessible input is required.\n python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This\n option could be beneficial if the Python operation is computational heavy (default=True).\n max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy\n data between processes. This is only used if python_multiprocessing is set to True (default 6 MB).\n\n Raises:\n RuntimeError: If source raises an exception during execution.\n RuntimeError: If len of column_names does not match output len of source.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> import numpy as np\n >>>\n >>> # 1) Multidimensional generator function as callable input.\n >>> def generator_multidimensional():\n ... for i in range(64):\n ... yield (np.array([[i, i + 1], [i + 2, i + 3]]),)\n >>>\n >>> dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=[\"multi_dimensional_data\"])\n >>>\n >>> # 2) Multi-column generator function as callable input.\n >>> def generator_multi_column():\n ... for i in range(64):\n ... yield np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])\n >>>\n >>> dataset = ds.GeneratorDataset(source=generator_multi_column, column_names=[\"col1\", \"col2\"])\n >>>\n >>> # 3) Iterable dataset as iterable input.\n >>> class MyIterable:\n ... def __init__(self):\n ... self._index = 0\n ... self._data = np.random.sample((5, 2))\n ... self._label = np.random.sample((5, 1))\n ...\n ... def __next__(self):\n ... if self._index >= len(self._data):\n ... raise StopIteration\n ... else:\n ... item = (self._data[self._index], self._label[self._index])\n ... self._index += 1\n ... return item\n ...\n ... def __iter__(self):\n ... self._index = 0\n ... return self\n ...\n ... def __len__(self):\n ... return len(self._data)\n >>>\n >>> dataset = ds.GeneratorDataset(source=MyIterable(), column_names=[\"data\", \"label\"])\n >>>\n >>> # 4) Random accessible dataset as random accessible input.\n >>> class MyAccessible:\n ... def __init__(self):\n ... self._data = np.random.sample((5, 2))\n ... self._label = np.random.sample((5, 1))\n ...\n ... def __getitem__(self, index):\n ... return self._data[index], self._label[index]\n ...\n ... def __len__(self):\n ... return len(self._data)\n >>>\n >>> dataset = ds.GeneratorDataset(source=MyAccessible(), column_names=[\"data\", \"label\"])\n >>>\n >>> # list, dict, tuple of Python is also random accessible\n >>> dataset = ds.GeneratorDataset(source=[(np.array(0),), (np.array(1),), (np.array(2),)], column_names=[\"col\"])\n \"\"\"\n\n @check_generatordataset\n def __init__(self, source, column_names=None, column_types=None, schema=None, num_samples=None,\n num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None,\n python_multiprocessing=True, max_rowsize=6):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id)\n self.source = source\n self.prepared_source = None # source to be sent to C++\n\n self.python_multiprocessing = python_multiprocessing\n\n self.column_names = to_list(column_names)\n\n if column_types is not None:\n self.column_types = mstypelist_to_detypelist(column_types)\n else:\n self.column_types = []\n\n self.schema = schema\n if schema is not None:\n self.schema = schema\n if not isinstance(schema, Schema):\n self.schema = Schema(schema)\n # Move get dataset_size by len from parse to here, because self.source will\n # lose attribution of '__len__' after deepcopy.\n self.source_len = -1 # unknown\n if hasattr(self.source, \"__len__\"):\n self.source_len = len(self.source)\n\n self.max_rowsize = max_rowsize\n self.sample_fn = None\n\n def __deepcopy__(self, memodict):\n if id(self) in memodict:\n return memodict[id(self)]\n new_op = self.__safe_deepcopy__(memodict, exclude=(\"source\", \"__transfer_dataset__\"))\n\n sample_fn = None\n if new_op.sampler is not None and hasattr(self.source, \"__getitem__\"):\n # The reason why there is a try catch here is because when the new op is being constructed with shared\n # memory enabled, there will be an exception thrown if there is not enough shared memory available\n if self.source_len == -1:\n raise RuntimeError(\"Attempt to construct a random access dataset, '__len__' method is required!\")\n try:\n if new_op.num_parallel_workers > 1:\n # if use num_parallel_workers is to large when python_multiprocessing=True which would cause OOM error\n # get the num_shards\n valid_num_shards = 1\n if isinstance(self.sampler, samplers.DistributedSampler):\n valid_num_shards = self.sampler.num_shards\n elif self.num_shards is not None:\n valid_num_shards = self.num_shards\n\n # get process memory usage\n process = psutil.Process(os.getpid())\n process_memory = process.memory_info().rss\n sys_memory = psutil.virtual_memory().total\n\n total_memory_maybe_used = process_memory * (new_op.num_parallel_workers + 1) * valid_num_shards\n if total_memory_maybe_used / sys_memory > 0.85:\n valid_num_worker = math.floor(sys_memory * 0.85 / valid_num_shards / process_memory - 1)\n valid_num_worker = 1 if valid_num_worker <= 0 else valid_num_worker\n if total_memory_maybe_used / sys_memory > 1.0:\n info = \"GeneratorDataset num_parallel_workers: \" + str(new_op.num_parallel_workers) + \\\n \" is too large which maybe cause a lot of memory occupation (>100%) during multi \" \\\n \"process running. Therefore, it is recommended to reduce num_parallel_workers to \" \\\n + str(valid_num_worker) + \" or smaller.\"\n raise RuntimeError(info)\n info = \"GeneratorDataset num_parallel_workers: \" + str(new_op.num_parallel_workers) + \\\n \" is too large which maybe cause a lot of memory occupation (>85%) during multi \" \\\n \"process running. Therefore, it is recommended to reduce num_parallel_workers to \" \\\n + str(valid_num_worker) + \" or smaller.\"\n logger.warning(info)\n\n sample_fn = SamplerFn(self.source, new_op.num_parallel_workers, self.python_multiprocessing,\n self.max_rowsize)\n new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn_mp(sample_ids, sample_fn))\n else:\n new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn(sample_ids, self.source))\n new_op.sample_fn = sample_fn\n except RuntimeError as e:\n raise Exception(str(e))\n else:\n try:\n new_op.sampler = None\n new_op.sample_fn = sample_fn\n new_op.source_len = min(new_op.source_len,\n new_op.num_samples) if new_op.num_samples != 0 else new_op.source_len\n iter(self.source)\n except TypeError:\n # Use generator function if input callable\n new_op.prepared_source = (lambda: _generator_fn(self.source, new_op.num_samples))\n else:\n # Use iterator function if input is iterable\n # Random accessible input is also iterable\n new_op.prepared_source = (lambda: _iter_fn(self.source, new_op.num_samples))\n\n return new_op\n\n def is_shuffled(self):\n return self.sampler.is_shuffled()\n\n def is_sharded(self):\n return self.sampler.is_sharded()\n\n def parse(self, children=None):\n if self.schema is None:\n return cde.GeneratorNode(self.prepared_source, self.column_names, self.column_types, self.source_len,\n self.sampler, self.num_parallel_workers)\n schema = self.schema\n if isinstance(schema, Schema):\n schema = self.schema.cpp_schema\n return cde.GeneratorNode(self.prepared_source, schema, self.source_len, self.sampler,\n self.num_parallel_workers)\n\n\nclass TFRecordDataset(SourceDataset):\n \"\"\"\n A source dataset for reading and parsing datasets stored on disk in TFData format.\n\n The columns of generated dataset depend on the source TFRecord files.\n\n Args:\n dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a\n pattern of files. The list will be sorted in a lexicographical order.\n schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).\n If the schema is not provided, the meta data from the TFData file is considered the schema.\n columns_list (list[str], optional): List of columns to be read (default=None, read all columns).\n num_samples (int, optional): The number of samples (rows) to be included in the dataset (default=None).\n If num_samples is None and numRows(parsed from schema) does not exist, read the full dataset;\n If num_samples is None and numRows(parsed from schema) is greater than 0, read numRows rows;\n If both num_samples and numRows(parsed from schema) are greater than 0, read num_samples rows.\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch\n (default=Shuffle.GLOBAL).\n If shuffle is False, no shuffling will be performed;\n If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL\n Otherwise, there are two levels of shuffling:\n\n - Shuffle.GLOBAL: Shuffle both the files and samples.\n\n - Shuffle.FILES: Shuffle files only.\n\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n shard_equal_rows (bool, optional): Get equal rows for all shards(default=False). If shard_equal_rows\n is false, number of rows of each shard may be not equal, and may lead to a failure in distributed training.\n When the number of samples of per TFRecord file are not equal, it is suggested to set to true.\n This argument should only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_files are not valid or do not exist.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Examples:\n >>> from mindspore import dtype as mstype\n >>>\n >>> tfrecord_dataset_dir = [\"/path/to/tfrecord_dataset_file\"] # contains 1 or multiple TFRecord files\n >>> tfrecord_schema_file = \"/path/to/tfrecord_schema_file\"\n >>>\n >>> # 1) Get all rows from tfrecord_dataset_dir with no explicit schema.\n >>> # The meta-data in the first row will be used as a schema.\n >>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir)\n >>>\n >>> # 2) Get all rows from tfrecord_dataset_dir with user-defined schema.\n >>> schema = ds.Schema()\n >>> schema.add_column(name='col_1d', de_type=mstype.int64, shape=[2])\n >>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=schema)\n >>>\n >>> # 3) Get all rows from tfrecord_dataset_dir with schema file.\n >>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=tfrecord_schema_file)\n \"\"\"\n\n @check_tfrecorddataset\n def __init__(self, dataset_files, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,\n shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,\n num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.dataset_files = self._find_files(dataset_files)\n self.dataset_files.sort()\n\n self.schema = schema\n self.columns_list = replace_none(columns_list, [])\n self.shard_equal_rows = replace_none(shard_equal_rows, False)\n\n if self.schema is not None and (self.num_samples is None or self.num_samples == 0):\n self.num_samples = Schema.get_num_rows(self.schema)\n\n def parse(self, children=None):\n schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema\n return cde.TFRecordNode(self.dataset_files, schema, self.columns_list, self.num_samples, self.shuffle_flag,\n self.num_shards, self.shard_id, self.shard_equal_rows)\n\n\nclass ManifestDataset(MappableDataset):\n \"\"\"\n A source dataset for reading images from a Manifest file.\n\n The generated dataset has two columns: :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is of a scalar of uint64 type.\n\n Args:\n dataset_file (str): File to be read.\n usage (str, optional): Acceptable usages include `train`, `eval` and `inference` (default=`train`).\n num_samples (int, optional): The number of images to be included in the dataset.\n (default=None, will include all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n class_indexing (dict, optional): A str-to-int mapping from label name to index\n (default=None, the folder names will be sorted alphabetically and each\n class will be given a unique index starting from 0).\n decode (bool, optional): decode the images after reading (default=False).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the max number of samples per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_files are not valid or do not exist.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n RuntimeError: If class_indexing is not a dictionary.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> manifest_dataset_dir = \"/path/to/manifest_dataset_file\"\n >>>\n >>> # 1) Read all samples specified in manifest_dataset_dir dataset with 8 threads for training\n >>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, usage=\"train\", num_parallel_workers=8)\n >>>\n >>> # 2) Read samples (specified in manifest_file.manifest) for shard 0 in a 2-way distributed training setup\n >>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, num_shards=2, shard_id=0)\n \"\"\"\n\n @check_manifestdataset\n def __init__(self, dataset_file, usage=\"train\", num_samples=None, num_parallel_workers=None, shuffle=None,\n sampler=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_file = dataset_file\n self.decode = replace_none(decode, False)\n self.usage = replace_none(usage, \"train\")\n self.class_indexing = replace_none(class_indexing, {})\n\n def parse(self, children=None):\n return cde.ManifestNode(self.dataset_file, self.usage, self.sampler, self.class_indexing, self.decode)\n\n def get_class_indexing(self):\n \"\"\"\n Get the class index.\n\n Returns:\n dict, a str-to-int mapping from label name to index.\n\n Examples:\n >>> manifest_dataset_dir = \"/path/to/manifest_dataset_file\"\n >>>\n >>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir)\n >>> class_indexing = dataset.get_class_indexing()\n \"\"\"\n if self.class_indexing is None or not self.class_indexing:\n if self._class_indexing is None:\n runtime_getter = self._init_tree_getters()\n self._class_indexing = runtime_getter[0].GetClassIndexing()\n self.class_indexing = {}\n for pair in self._class_indexing:\n self.class_indexing[pair[0]] = pair[1][0]\n return self.class_indexing\n\n\nclass Cifar10Dataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing Cifar10 dataset.\n This api only supports parsing Cifar10 file in binary version now.\n\n The generated dataset has two columns :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000\n train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples\n (default=None, all samples).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> cifar10_dataset_dir = \"/path/to/cifar10_dataset_directory\"\n >>>\n >>> # 1) Get all samples from CIFAR10 dataset in sequence\n >>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, shuffle=False)\n >>>\n >>> # 2) Randomly select 350 samples from CIFAR10 dataset\n >>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_samples=350, shuffle=True)\n >>>\n >>> # 3) Get samples from CIFAR10 dataset for shard 0 in a 2-way distributed training\n >>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_shards=2, shard_id=0)\n >>>\n >>> # In CIFAR10 dataset, each dictionary has keys \"image\" and \"label\"\n\n About CIFAR-10 dataset:\n\n The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,\n with 6000 images per class. There are 50000 training images and 10000 test images.\n The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.\n\n Here is the original CIFAR-10 dataset structure.\n You can unzip the dataset files into the following directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── cifar-10-batches-bin\n ├── data_batch_1.bin\n ├── data_batch_2.bin\n ├── data_batch_3.bin\n ├── data_batch_4.bin\n ├── data_batch_5.bin\n ├── test_batch.bin\n ├── readme.html\n └── batches.meta.txt\n\n Citation:\n\n .. code-block::\n\n @techreport{Krizhevsky09,\n author = {Alex Krizhevsky},\n title = {Learning multiple layers of features from tiny images},\n institution = {},\n year = {2009},\n howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}\n }\n \"\"\"\n\n @check_mnist_cifar_dataset\n def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,\n sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.usage = replace_none(usage, \"all\")\n\n def parse(self, children=None):\n return cde.Cifar10Node(self.dataset_dir, self.usage, self.sampler)\n\n\nclass Cifar100Dataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing Cifar100 dataset.\n\n The generated dataset has three columns :py:obj:`[image, coarse_label, fine_label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`coarse_label` and :py:obj:`fine_labels` are each a scalar of uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000\n train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples\n (default=None, all samples).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, 'num_samples' reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and shuffle\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> cifar100_dataset_dir = \"/path/to/cifar100_dataset_directory\"\n >>>\n >>> # 1) Get all samples from CIFAR100 dataset in sequence\n >>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, shuffle=False)\n >>>\n >>> # 2) Randomly select 350 samples from CIFAR100 dataset\n >>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, num_samples=350, shuffle=True)\n >>>\n >>> # In CIFAR100 dataset, each dictionary has 3 keys: \"image\", \"fine_label\" and \"coarse_label\"\n\n About CIFAR-100 dataset:\n\n This dataset is just like the CIFAR-10, except it has 100 classes containing 600 images\n each. There are 500 training images and 100 testing images per class. The 100 classes in\n the CIFAR-100 are grouped into 20 superclasses. Each image comes with a \"fine\" label (the\n class to which it belongs) and a \"coarse\" label (the superclass to which it belongs).\n\n Here is the original CIFAR-100 dataset structure.\n You can unzip the dataset files into the following directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── cifar-100-binary\n ├── train.bin\n ├── test.bin\n ├── fine_label_names.txt\n └── coarse_label_names.txt\n\n Citation:\n\n .. code-block::\n\n @techreport{Krizhevsky09,\n author = {Alex Krizhevsky},\n title = {Learning multiple layers of features from tiny images},\n institution = {},\n year = {2009},\n howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}\n }\n \"\"\"\n\n @check_mnist_cifar_dataset\n def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,\n sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.usage = replace_none(usage, \"all\")\n\n def parse(self, children=None):\n return cde.Cifar100Node(self.dataset_dir, self.usage, self.sampler)\n\n\nclass RandomDataset(SourceDataset):\n \"\"\"\n A source dataset that generates random data.\n\n Args:\n total_rows (int, optional): Number of samples for the dataset to generate\n (default=None, number of samples is random).\n schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).\n If the schema is not provided, the random dataset generates a random schema.\n columns_list (list[str], optional): List of columns to be read (default=None, read all columns)\n num_samples (int, optional): The number of samples to be included in the dataset\n (default=None, all samples).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, 'num_samples' reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n \"\"\"\n\n @check_random_dataset\n def __init__(self, total_rows=None, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,\n cache=None, shuffle=None, num_shards=None, shard_id=None):\n super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,\n num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.total_rows = total_rows\n if schema is not None:\n self.total_rows = replace_none(total_rows, Schema.get_num_rows(schema))\n self.schema = schema\n self.columns_list = replace_none(columns_list, [])\n\n def parse(self, children=None):\n schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema\n return cde.RandomNode(self.total_rows, schema, self.columns_list)\n\n\nclass Schema:\n \"\"\"\n Class to represent a schema of a dataset.\n\n Args:\n schema_file(str): Path of the schema file (default=None).\n\n Returns:\n Schema object, schema info about dataset.\n\n Raises:\n RuntimeError: If schema file failed to load.\n\n Examples:\n >>> from mindspore import dtype as mstype\n >>>\n >>> # Create schema; specify column name, mindspore.dtype and shape of the column\n >>> schema = ds.Schema()\n >>> schema.add_column(name='col1', de_type=mstype.int64, shape=[2])\n \"\"\"\n\n @check_schema\n def __init__(self, schema_file=None):\n self.schema_file = replace_none(schema_file, \"\")\n self.cpp_schema = cde.SchemaObj(self.schema_file)\n\n @check_add_column\n def add_column(self, name, de_type, shape=None):\n \"\"\"\n Add new column to the schema.\n\n Args:\n name (str): The new name of the column.\n de_type (str): Data type of the column.\n shape (list[int], optional): Shape of the column\n (default=None, [-1] which is an unknown shape of rank 1).\n\n Raises:\n ValueError: If column type is unknown.\n \"\"\"\n if isinstance(de_type, typing.Type):\n de_type = mstype_to_detype(de_type)\n col_type = str(de_type)\n else:\n col_type = str(cde.DataType(de_type))\n if shape is None:\n self.cpp_schema.add_column(name, col_type)\n else:\n self.cpp_schema.add_column(name, col_type, shape)\n\n def parse_columns(self, columns):\n \"\"\"\n Parse the columns and add it to self.\n\n Args:\n columns (Union[dict, list[dict], tuple[dict]]): Dataset attribute information, decoded from schema file.\n\n - list[dict], 'name' and 'type' must be in keys, 'shape' optional.\n\n - dict, columns.keys() as name, columns.values() is dict, and 'type' inside, 'shape' optional.\n\n Raises:\n RuntimeError: If failed to parse columns.\n RuntimeError: If column's name field is missing.\n RuntimeError: If column's type field is missing.\n\n Examples:\n >>> schema = Schema()\n >>> columns1 = [{'name': 'image', 'type': 'int8', 'shape': [3, 3]},\n >>> {'name': 'label', 'type': 'int8', 'shape': [1]}]\n >>> schema.parse_columns(columns1)\n >>> columns2 = {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}}\n >>> schema.parse_columns(columns2)\n \"\"\"\n self.cpp_schema.parse_columns(json.dumps(columns, indent=2))\n\n def to_json(self):\n \"\"\"\n Get a JSON string of the schema.\n\n Returns:\n str, JSON string of the schema.\n \"\"\"\n return self.cpp_schema.to_json()\n\n def from_json(self, json_obj):\n \"\"\"\n Get schema file from JSON object.\n\n Args:\n json_obj(dictionary): Object of JSON parsed.\n\n Raises:\n RuntimeError: if there is unknown item in the object.\n RuntimeError: if dataset type is missing in the object.\n RuntimeError: if columns are missing in the object.\n \"\"\"\n self.cpp_schema.from_string(json.dumps(json_obj, indent=2))\n\n def __str__(self):\n return self.to_json()\n\n @staticmethod\n def get_num_rows(schema):\n schema_obj = schema\n if not isinstance(schema_obj, Schema):\n schema_obj = Schema(schema_obj)\n return schema_obj.cpp_schema.get_num_rows()\n\n\nclass USPSDataset(SourceDataset):\n \"\"\"\n A source dataset for reading and parsing the USPS dataset.\n\n The generated dataset has two columns: :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is of a scalar of uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str, optional): Usage of this dataset, can be \"train\", \"test\" or \"all\". \"train\" will read from 7,291\n train samples, \"test\" will read from 2,007 test samples, \"all\" will read from all 9,298 samples.\n (default=None, will read all samples)\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch\n (default=Shuffle.GLOBAL).\n If shuffle is False, no shuffling will be performed;\n If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL\n Otherwise, there are two levels of shuffling:\n\n - Shuffle.GLOBAL: Shuffle both the files and samples.\n\n - Shuffle.FILES: Shuffle files only.\n\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the max sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir is not valid or does not exist or does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If usage is invalid.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Examples:\n >>> usps_dataset_dir = \"/path/to/usps_dataset_directory\"\n >>>\n >>> # Read 3 samples from USPS dataset\n >>> dataset = ds.USPSDataset(dataset_dir=usps_dataset_dir, num_samples=3)\n >>>\n >>> # Note: In USPS dataset, each dictionary has keys \"image\" and \"label\"\n\n About USPS dataset:\n\n USPS is a digit dataset automatically scanned from envelopes by the U.S. Postal Service\n containing a total of 9,298 16×16 pixel grayscale samples.\n The images are centered, normalized and show a broad range of font styles.\n\n Here is the original USPS dataset structure.\n You can download and unzip the dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n .\n └── usps_dataset_dir\n ├── usps\n ├── usps.t\n\n Citation:\n\n .. code-block::\n\n @article{hull1994database,\n title={A database for handwritten text recognition research},\n author={Hull, Jonathan J.},\n journal={IEEE Transactions on pattern analysis and machine intelligence},\n volume={16},\n number={5},\n pages={550--554},\n year={1994},\n publisher={IEEE}\n }\n \"\"\"\n\n @check_usps_dataset\n def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,\n num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,\n num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.usage = replace_none(usage, \"all\")\n\n def parse(self, children=None):\n return cde.USPSNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,\n self.shard_id)\n\n\nclass VOCDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing VOC dataset.\n\n The generated dataset with different task setting has different output columns:\n\n - task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \\\n :py:obj:`[label, dtype=uint32]`, :py:obj:`[difficult, dtype=uint32]`, :py:obj:`[truncate, dtype=uint32]`.\n - task = :py:obj:`Segmentation`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[target,dtype=uint8]`.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n task (str, optional): Set the task type of reading voc data, now only support `Segmentation` or `Detection`\n (default=`Segmentation`).\n usage (str, optional): Set the task type of ImageSets(default=`train`). If task is `Segmentation`, image and\n annotation list will be loaded in ./ImageSets/Segmentation/usage + \".txt\"; If task is `Detection`, image and\n annotation list will be loaded in ./ImageSets/Main/usage + \".txt\"; if task and usage are not set, image and\n annotation list will be loaded in ./ImageSets/Segmentation/train.txt as default.\n class_indexing (dict, optional): A str-to-int mapping from label name to index, only valid in\n `Detection` task (default=None, the folder names will be sorted alphabetically and each\n class will be given a unique index starting from 0).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n decode (bool, optional): Decode the images after reading (default=False).\n sampler (Sampler, optional): Object used to choose samples from the dataset\n (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column named\n :py:obj:`[_meta-filename, dtype=string]` will be output at the end (default=False).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If xml of Annotations is an invalid format.\n RuntimeError: If xml of Annotations loss attribution of `object`.\n RuntimeError: If xml of Annotations loss attribution of `bndbox`.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If task is not equal 'Segmentation' or 'Detection'.\n ValueError: If task equal 'Segmentation' but class_indexing is not None.\n ValueError: If txt related to mode is not exist.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op\n is added to remove the prefix('_meta-').\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> voc_dataset_dir = \"/path/to/voc_dataset_directory\"\n >>>\n >>> # 1) Read VOC data for segmentation training\n >>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task=\"Segmentation\", usage=\"train\")\n >>>\n >>> # 2) Read VOC data for detection training\n >>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task=\"Detection\", usage=\"train\")\n >>>\n >>> # 3) Read all VOC dataset samples in voc_dataset_dir with 8 threads in random order\n >>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task=\"Detection\", usage=\"train\",\n ... num_parallel_workers=8)\n >>>\n >>> # 4) Read then decode all VOC dataset samples in voc_dataset_dir in sequence\n >>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task=\"Detection\", usage=\"train\",\n ... decode=True, shuffle=False)\n >>>\n >>> # In VOC dataset, if task='Segmentation', each dictionary has keys \"image\" and \"target\"\n >>> # In VOC dataset, if task='Detection', each dictionary has keys \"image\" and \"annotation\"\n\n About VOC dataset.\n\n The PASCAL Visual Object Classes (VOC) challenge is a benchmark in visual\n object category recognition and detection, providing the vision and machine\n learning communities with a standard dataset of images and annotation, and\n standard evaluation procedures.\n\n You can unzip the original VOC-2012 dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── voc2012_dataset_dir\n ├── Annotations\n │ ├── 2007_000027.xml\n │ ├── 2007_000032.xml\n │ ├── ...\n ├── ImageSets\n │ ├── Action\n │ ├── Layout\n │ ├── Main\n │ └── Segmentation\n ├── JPEGImages\n │ ├── 2007_000027.jpg\n │ ├── 2007_000032.jpg\n │ ├── ...\n ├── SegmentationClass\n │ ├── 2007_000032.png\n │ ├── 2007_000033.png\n │ ├── ...\n └── SegmentationObject\n ├── 2007_000032.png\n ├── 2007_000033.png\n ├── ...\n\n Citation:\n\n .. code-block::\n\n @article{Everingham10,\n author = {Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.},\n title = {The Pascal Visual Object Classes (VOC) Challenge},\n journal = {International Journal of Computer Vision},\n volume = {88},\n year = {2012},\n number = {2},\n month = {jun},\n pages = {303--338},\n biburl = {http://host.robots.ox.ac.uk/pascal/VOC/pubs/everingham10.html#bibtex},\n howpublished = {http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html}\n }\n \"\"\"\n\n @check_vocdataset\n def __init__(self, dataset_dir, task=\"Segmentation\", usage=\"train\", class_indexing=None, num_samples=None,\n num_parallel_workers=None, shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None,\n cache=None, extra_metadata=False):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.dataset_dir = dataset_dir\n self.task = replace_none(task, \"Segmentation\")\n self.usage = replace_none(usage, \"train\")\n self.class_indexing = replace_none(class_indexing, {})\n self.decode = replace_none(decode, False)\n self.extra_metadata = extra_metadata\n\n def parse(self, children=None):\n return cde.VOCNode(self.dataset_dir, self.task, self.usage, self.class_indexing, self.decode, self.sampler,\n self.extra_metadata)\n\n def get_class_indexing(self):\n \"\"\"\n Get the class index.\n\n Returns:\n dict, a str-to-int mapping from label name to index.\n\n Examples:\n >>> voc_dataset_dir = \"/path/to/voc_dataset_directory\"\n >>>\n >>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir)\n >>> class_indexing = dataset.get_class_indexing()\n \"\"\"\n if self.task != \"Detection\":\n raise NotImplementedError(\"Only 'Detection' support get_class_indexing.\")\n if self.class_indexing is None or not self.class_indexing:\n if self._class_indexing is None:\n runtime_getter = self._init_tree_getters()\n self._class_indexing = runtime_getter[0].GetClassIndexing()\n self.class_indexing = {}\n for pair in self._class_indexing:\n self.class_indexing[pair[0]] = pair[1][0]\n return self.class_indexing\n\n\nclass CocoDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing COCO dataset.\n\n CocoDataset supports four kinds of tasks, which are Object Detection, Keypoint Detection, Stuff Segmentation and\n Panoptic Segmentation of 2017 Train/Val/Test dataset.\n\n The generated dataset with different task setting has different output columns:\n\n - task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \\\n :py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`.\n - task = :py:obj:`Stuff`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[segmentation,dtype=float32]`, \\\n :py:obj:`[iscrowd,dtype=uint32]`.\n - task = :py:obj:`Keypoint`, output columns: :py:obj:`[image, dtype=uint8]`, \\\n :py:obj:`[keypoints, dtype=float32]`, :py:obj:`[num_keypoints, dtype=uint32]`.\n - task = :py:obj:`Panoptic`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \\\n :py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`, :py:obj:`[area, dtype=uint32]`.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n annotation_file (str): Path to the annotation JSON file.\n task (str, optional): Set the task type for reading COCO data. Supported task types:\n `Detection`, `Stuff`, `Panoptic` and `Keypoint` (default=`Detection`).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the configuration file).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n decode (bool, optional): Decode the images after reading (default=False).\n sampler (Sampler, optional): Object used to choose samples from the dataset\n (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column will be\n output at the end :py:obj:`[_meta-filename, dtype=string]` (default=False).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n RuntimeError: If parse JSON file failed.\n ValueError: If task is not in [`Detection`, `Stuff`, `Panoptic`, `Keypoint`].\n ValueError: If annotation_file is not exist.\n ValueError: If dataset_dir is not exist.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op is added\n to remove the prefix('_meta-').\n - CocoDataset doesn't support PKSampler.\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> coco_dataset_dir = \"/path/to/coco_dataset_directory/images\"\n >>> coco_annotation_file = \"/path/to/coco_dataset_directory/annotation_file\"\n >>>\n >>> # 1) Read COCO data for Detection task\n >>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,\n ... annotation_file=coco_annotation_file,\n ... task='Detection')\n >>>\n >>> # 2) Read COCO data for Stuff task\n >>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,\n ... annotation_file=coco_annotation_file,\n ... task='Stuff')\n >>>\n >>> # 3) Read COCO data for Panoptic task\n >>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,\n ... annotation_file=coco_annotation_file,\n ... task='Panoptic')\n >>>\n >>> # 4) Read COCO data for Keypoint task\n >>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,\n ... annotation_file=coco_annotation_file,\n ... task='Keypoint')\n >>>\n >>> # In COCO dataset, each dictionary has keys \"image\" and \"annotation\"\n\n About COCO dataset:\n\n COCO(Microsoft Common Objects in Context) is a large-scale object detection, segmentation, and captioning dataset\n with several features: Object segmentation, Recognition in context, Superpixel stuff segmentation,\n 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories,\n 5 captions per image, 250,000 people with keypoints. In contrast to the popular ImageNet dataset, COCO has fewer\n categories but more instances in per category.\n\n You can unzip the original COCO-2017 dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── coco_dataset_directory\n ├── train2017\n │ ├── 000000000009.jpg\n │ ├── 000000000025.jpg\n │ ├── ...\n ├── test2017\n │ ├── 000000000001.jpg\n │ ├── 000000058136.jpg\n │ ├── ...\n ├── val2017\n │ ├── 000000000139.jpg\n │ ├── 000000057027.jpg\n │ ├── ...\n └── annotations\n ├── captions_train2017.json\n ├── captions_val2017.json\n ├── instances_train2017.json\n ├── instances_val2017.json\n ├── person_keypoints_train2017.json\n └── person_keypoints_val2017.json\n\n Citation:\n\n .. code-block::\n\n @article{DBLP:journals/corr/LinMBHPRDZ14,\n author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and\n Lubomir D. Bourdev and Ross B. Girshick and James Hays and\n Pietro Perona and Deva Ramanan and Piotr Doll{\\'{a}}r and C. Lawrence Zitnick},\n title = {Microsoft {COCO:} Common Objects in Context},\n journal = {CoRR},\n volume = {abs/1405.0312},\n year = {2014},\n url = {http://arxiv.org/abs/1405.0312},\n archivePrefix = {arXiv},\n eprint = {1405.0312},\n timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},\n biburl = {https://dblp.org/rec/journals/corr/LinMBHPRDZ14.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n }\n \"\"\"\n\n @check_cocodataset\n def __init__(self, dataset_dir, annotation_file, task=\"Detection\", num_samples=None, num_parallel_workers=None,\n shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None, cache=None,\n extra_metadata=False):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.dataset_dir = dataset_dir\n self.annotation_file = annotation_file\n self.task = replace_none(task, \"Detection\")\n self.decode = replace_none(decode, False)\n self.extra_metadata = extra_metadata\n\n def parse(self, children=None):\n return cde.CocoNode(self.dataset_dir, self.annotation_file, self.task, self.decode, self.sampler,\n self.extra_metadata)\n\n def get_class_indexing(self):\n \"\"\"\n Get the class index.\n\n Returns:\n dict, a str-to-list<int> mapping from label name to index.\n\n Examples:\n >>> coco_dataset_dir = \"/path/to/coco_dataset_directory/images\"\n >>> coco_annotation_file = \"/path/to/coco_dataset_directory/annotation_file\"\n >>>\n >>> # Read COCO data for Detection task\n >>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,\n ... annotation_file=coco_annotation_file,\n ... task='Detection')\n >>>\n >>> class_indexing = dataset.get_class_indexing()\n \"\"\"\n if self.task not in {\"Detection\", \"Panoptic\"}:\n raise NotImplementedError(\"Only 'Detection' and 'Panoptic' support get_class_indexing.\")\n if self._class_indexing is None:\n runtime_getter = self._init_tree_getters()\n self._class_indexing = dict(runtime_getter[0].GetClassIndexing())\n return self._class_indexing\n\n\nclass CelebADataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing CelebA dataset.\n Only support to read `list_attr_celeba.txt` currently, which is the attribute annotations of the dataset.\n\n The generated dataset has two columns: :py:obj:`[image, attr]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`attr` is of the uint32 type and one hot encoded.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n num_parallel_workers (int, optional): Number of workers to read the data (default=None, will use value set in\n the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None).\n usage (str, optional): Specify the `train`, `valid`, `test` part or `all` parts of dataset\n (default=`all`, will read all samples).\n sampler (Sampler, optional): Object used to choose samples from the dataset (default=None).\n decode (bool, optional): decode the images after reading (default=False).\n extensions (list[str], optional): List of file extensions to be included in the dataset (default=None).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will include all images).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> celeba_dataset_dir = \"/path/to/celeba_dataset_directory\"\n >>>\n >>> # Read 5 samples from CelebA dataset\n >>> dataset = ds.CelebADataset(dataset_dir=celeba_dataset_dir, usage='train', num_samples=5)\n >>>\n >>> # Note: In celeba dataset, each data dictionary owns keys \"image\" and \"attr\"\n\n About CelebA dataset:\n\n CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset\n with more than 200K celebrity images, each with 40 attribute annotations.\n\n The images in this dataset cover large pose variations and background clutter.\n CelebA has large diversities, large quantities, and rich annotations, including\n\n * 10,177 number of identities,\n * 202,599 number of face images, and\n * 5 landmark locations, 40 binary attributes annotations per image.\n\n The dataset can be employed as the training and test sets for the following computer\n vision tasks: face attribute recognition, face detection, landmark (or facial part)\n localization, and face editing & synthesis.\n\n Original CelebA dataset structure:\n\n .. code-block::\n\n .\n └── CelebA\n ├── README.md\n ├── Img\n │ ├── img_celeba.7z\n │ ├── img_align_celeba_png.7z\n │ └── img_align_celeba.zip\n ├── Eval\n │ └── list_eval_partition.txt\n └── Anno\n ├── list_landmarks_celeba.txt\n ├── list_landmarks_align_celeba.txt\n ├── list_bbox_celeba.txt\n ├── list_attr_celeba.txt\n └── identity_CelebA.txt\n\n You can unzip the dataset files into the following structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── celeba_dataset_directory\n ├── list_attr_celeba.txt\n ├── 000001.jpg\n ├── 000002.jpg\n ├── 000003.jpg\n ├── ...\n\n Citation:\n\n .. code-block::\n\n @article{DBLP:journals/corr/LiuLWT14,\n author = {Ziwei Liu and Ping Luo and Xiaogang Wang and Xiaoou Tang},\n title = {Deep Learning Face Attributes in the Wild},\n journal = {CoRR},\n volume = {abs/1411.7766},\n year = {2014},\n url = {http://arxiv.org/abs/1411.7766},\n archivePrefix = {arXiv},\n eprint = {1411.7766},\n timestamp = {Tue, 10 Dec 2019 15:37:26 +0100},\n biburl = {https://dblp.org/rec/journals/corr/LiuLWT14.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org},\n howpublished = {http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html}\n }\n \"\"\"\n\n @check_celebadataset\n def __init__(self, dataset_dir, num_parallel_workers=None, shuffle=None, usage='all', sampler=None, decode=False,\n extensions=None, num_samples=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.dataset_dir = dataset_dir\n self.decode = replace_none(decode, False)\n self.extensions = replace_none(extensions, [])\n self.usage = replace_none(usage, \"all\")\n\n def parse(self, children=None):\n if self.usage != \"all\":\n dataset_dir = os.path.realpath(self.dataset_dir)\n partition_file = os.path.join(dataset_dir, \"list_eval_partition.txt\")\n if os.path.exists(partition_file) is False:\n raise RuntimeError(\"Partition file can not be found when usage is not 'all'.\")\n return cde.CelebANode(self.dataset_dir, self.usage, self.sampler, self.decode, self.extensions)\n\n\nclass CLUEDataset(SourceDataset):\n \"\"\"\n A source dataset that reads and parses CLUE datasets.\n Supported CLUE classification tasks: `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.\n\n The generated dataset with different task setting has different output columns:\n\n - task = :py:obj:`AFQMC`\n - usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \\\n :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.\n - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \\\n :py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.\n - usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \\\n :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.\n\n - task = :py:obj:`TNEWS`\n - usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \\\n :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.\n - usage = :py:obj:`test`, output columns: :py:obj:`[label, dtype=string]`, \\\n :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.\n - usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \\\n :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.\n\n - task = :py:obj:`IFLYTEK`\n - usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \\\n :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.\n - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=string]`, \\\n :py:obj:`[sentence, dtype=string]`.\n - usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \\\n :py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.\n\n - task = :py:obj:`CMNLI`\n - usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \\\n :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.\n - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \\\n :py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.\n - usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \\\n :py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.\n\n - task = :py:obj:`WSC`\n - usage = :py:obj:`train`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \\\n :py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \\\n :py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \\\n :py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.\n - usage = output columns: :py:obj:`[span1_index, dtype=uint8]`, \\\n :py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \\\n :py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, :py:obj:`[text, dtype=string]`.\n - usage = :py:obj:`eval`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \\\n :py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \\\n :py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \\\n :py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.\n\n - task = :py:obj:`CSL`\n - usage = :py:obj:`train`, output columns: :py:obj:`[id, dtype=uint8]`, \\\n :py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.\n - usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \\\n :py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`.\n - usage = :py:obj:`eval`, output columns: :py:obj:`[id, dtype=uint8]`, \\\n :py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.\n\n Args:\n dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for\n a pattern of files. The list will be sorted in a lexicographical order.\n task (str, optional): The kind of task, one of `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.\n (default=AFQMC).\n usage (str, optional): Specify the `train`, `test` or `eval` part of dataset (default=\"train\").\n num_samples (int, optional): The number of samples to be included in the dataset\n (default=None, will include all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch\n (default=Shuffle.GLOBAL).\n If shuffle is False, no shuffling will be performed;\n If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL\n Otherwise, there are two levels of shuffling:\n\n - Shuffle.GLOBAL: Shuffle both the files and samples.\n\n - Shuffle.FILES: Shuffle files only.\n\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_files are not valid or do not exist.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n\n Examples:\n >>> clue_dataset_dir = [\"/path/to/clue_dataset_file\"] # contains 1 or multiple clue files\n >>> dataset = ds.CLUEDataset(dataset_files=clue_dataset_dir, task='AFQMC', usage='train')\n\n About CLUE dataset:\n\n CLUE, a Chinese Language Understanding Evaluation benchmark. It contains multiple\n tasks, including single-sentence classification, sentence pair classification, and machine\n reading comprehension.\n\n You can unzip the dataset files into the following structure and read by MindSpore's API,\n such as afqmc dataset:\n\n .. code-block::\n\n .\n └── afqmc_public\n ├── train.json\n ├── test.json\n └── dev.json\n\n Citation:\n\n .. code-block::\n\n @article{CLUEbenchmark,\n title = {CLUE: A Chinese Language Understanding Evaluation Benchmark},\n author = {Liang Xu, Xuanwei Zhang, Lu Li, Hai Hu, Chenjie Cao, Weitang Liu, Junyi Li, Yudong Li,\n Kai Sun, Yechen Xu, Yiming Cui, Cong Yu, Qianqian Dong, Yin Tian, Dian Yu, Bo Shi, Jun Zeng,\n Rongzhao Wang, Weijian Xie, Yanting Li, Yina Patterson, Zuoyu Tian, Yiwen Zhang, He Zhou,\n Shaoweihua Liu, Qipeng Zhao, Cong Yue, Xinrui Zhang, Zhengliang Yang, Zhenzhong Lan},\n journal = {arXiv preprint arXiv:2004.05986},\n year = {2020},\n howpublished = {https://github.com/CLUEbenchmark/CLUE}\n }\n \"\"\"\n\n @check_cluedataset\n def __init__(self, dataset_files, task='AFQMC', usage='train', num_samples=None, num_parallel_workers=None,\n shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,\n num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.dataset_files = self._find_files(dataset_files)\n self.usage = replace_none(usage, 'train')\n self.task = replace_none(task, 'AFQMC')\n\n def parse(self, children=None):\n return cde.CLUENode(self.dataset_files, self.task, self.usage, self.num_samples, self.shuffle_flag,\n self.num_shards, self.shard_id)\n\n\nclass CSVDataset(SourceDataset):\n \"\"\"\n A source dataset that reads and parses comma-separated values (CSV) datasets.\n The columns of generated dataset depend on the source CSV files.\n\n Args:\n dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search\n for a pattern of files. The list will be sorted in a lexicographical order.\n field_delim (str, optional): A string that indicates the char delimiter to separate fields (default=',').\n column_defaults (list, optional): List of default values for the CSV field (default=None). Each item\n in the list is either a valid type (float, int, or string). If this is not provided, treats all\n columns as string type.\n column_names (list[str], optional): List of column names of the dataset (default=None). If this\n is not provided, infers the column_names from the first row of CSV file.\n num_samples (int, optional): The number of samples to be included in the dataset\n (default=None, will include all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch\n (default=Shuffle.GLOBAL).\n If shuffle is False, no shuffling will be performed;\n If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL\n Otherwise, there are two levels of shuffling:\n\n - Shuffle.GLOBAL: Shuffle both the files and samples.\n\n - Shuffle.FILES: Shuffle files only.\n\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_files are not valid or do not exist.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n\n Examples:\n >>> csv_dataset_dir = [\"/path/to/csv_dataset_file\"] # contains 1 or multiple csv files\n >>> dataset = ds.CSVDataset(dataset_files=csv_dataset_dir, column_names=['col1', 'col2', 'col3', 'col4'])\n \"\"\"\n\n @check_csvdataset\n def __init__(self, dataset_files, field_delim=',', column_defaults=None, column_names=None, num_samples=None,\n num_parallel_workers=None, shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,\n num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.dataset_files = self._find_files(dataset_files)\n self.dataset_files.sort()\n self.field_delim = replace_none(field_delim, ',')\n self.column_defaults = replace_none(column_defaults, [])\n self.column_names = replace_none(column_names, [])\n\n def parse(self, children=None):\n return cde.CSVNode(self.dataset_files, self.field_delim, self.column_defaults, self.column_names,\n self.num_samples, self.shuffle_flag, self.num_shards, self.shard_id)\n\n\nclass SBUDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing the SBU dataset.\n\n The generated dataset has two columns :py:obj:`[image, caption]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`caption` is of the string type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n decode (bool, optional): Decode the images after reading (default=False).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the max sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter 'sampler'\n - Parameter 'shuffle'\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> sbu_dataset_dir = \"/path/to/sbu_dataset_directory\"\n >>> # Read 3 samples from SBU dataset\n >>> dataset = ds.SBUDataset(dataset_dir=sbu_dataset_dir, num_samples=3)\n\n About SBU dataset:\n\n SBU dataset is a large captioned photo collection.\n It contains one million images with associated visually relevant captions.\n\n You should manually download the images using official download.m by replacing 'urls{i}(24, end)' with\n 'urls{i}(24:1:end)' and keep the directory as below.\n\n .. code-block::\n\n .\n └─ dataset_dir\n ├── SBU_captioned_photo_dataset_captions.txt\n ├── SBU_captioned_photo_dataset_urls.txt\n └── sbu_images\n ├── m_3326_3596303505_3ce4c20529.jpg\n ├── ......\n └── m_2522_4182181099_c3c23ab1cc.jpg\n\n Citation:\n\n .. code-block::\n\n @inproceedings{Ordonez:2011:im2text,\n Author = {Vicente Ordonez and Girish Kulkarni and Tamara L. Berg},\n Title = {Im2Text: Describing Images Using 1 Million Captioned Photographs},\n Booktitle = {Neural Information Processing Systems ({NIPS})},\n Year = {2011},\n }\n \"\"\"\n\n @check_sbu_dataset\n def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, decode=False,\n sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.decode = replace_none(decode, False)\n\n def parse(self, children=None):\n return cde.SBUNode(self.dataset_dir, self.decode, self.sampler)\n\n\nclass _Flowers102Dataset:\n \"\"\"\n Mainly for loading Flowers102 Dataset, and return one row each time.\n \"\"\"\n def __init__(self, dataset_dir, task, usage, decode):\n self.dataset_dir = os.path.realpath(dataset_dir)\n self.task = task\n self.usage = usage\n self.decode = decode\n\n if self.task == \"Classification\":\n self.column_names = [\"image\", \"label\"]\n else:\n self.column_names = [\"image\", \"segmentation\", \"label\"]\n\n labels_path = os.path.join(self.dataset_dir, \"imagelabels.mat\")\n setid_path = os.path.join(self.dataset_dir, \"setid.mat\")\n # minus one to transform 1~102 to 0 ~ 101\n self.labels = (loadmat(labels_path)[\"labels\"][0] - 1).astype(np.uint32)\n self.setid = loadmat(setid_path)\n\n if self.usage == 'train':\n self.indices = self.setid[\"trnid\"][0].tolist()\n elif self.usage == 'test':\n self.indices = self.setid[\"tstid\"][0].tolist()\n elif self.usage == 'valid':\n self.indices = self.setid[\"valid\"][0].tolist()\n elif self.usage == 'all':\n self.indices = self.setid[\"trnid\"][0].tolist()\n self.indices += self.setid[\"tstid\"][0].tolist()\n self.indices += self.setid[\"valid\"][0].tolist()\n else:\n raise ValueError(\"Input usage is not within the valid set of ['train', 'valid', 'test', 'all'].\")\n\n def __getitem__(self, index):\n # range: 1 ~ 8189\n image_path = os.path.join(self.dataset_dir, \"jpg\", \"image_\" + str(self.indices[index]).zfill(5) + \".jpg\")\n if not os.path.exists(image_path):\n raise RuntimeError(\"Can not find image file: \" + image_path)\n\n if self.decode is True:\n image = np.asarray(Image.open(image_path).convert(\"RGB\"))\n else:\n image = np.fromfile(image_path, dtype=np.uint8)\n\n label = self.labels[self.indices[index] - 1]\n\n if self.task == \"Segmentation\":\n segmentation_path = \\\n os.path.join(self.dataset_dir, \"segmim\", \"segmim_\" + str(self.indices[index]).zfill(5) + \".jpg\")\n if not os.path.exists(segmentation_path):\n raise RuntimeError(\"Can not find segmentation file: \" + segmentation_path)\n if self.decode is True:\n segmentation = np.asarray(Image.open(segmentation_path).convert(\"RGB\"))\n else:\n segmentation = np.fromfile(segmentation_path, dtype=np.uint8)\n return image, segmentation, label\n\n return image, label\n\n def __len__(self):\n return len(self.indices)\n\n\nclass Flowers102Dataset(GeneratorDataset):\n \"\"\"\n A source dataset for reading and parsing Flowers102 dataset.\n\n The generated dataset has two columns :py:obj:`[image, label]` or three :py:obj:`[image, segmentation, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`segmentation` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar or a tensor of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n task (str): Specify the 'Classification' or 'Segmentation' task (default='Classification').\n usage (str): Specify the 'train', 'valid', 'test' part or 'all' parts of dataset\n (default='all', will read all samples).\n num_samples (int, optional): The number of samples to be included in the dataset (default=None, all images).\n num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.\n (default=None, expected order behavior shown in the table).\n decode (bool, optional): Whether or not to decode the images and segmentations after reading (default=False).\n sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible\n input is required (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n Random accessible input is required. When this argument is specified, 'num_samples' reflects the max\n sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only\n when num_shards is also specified. Random accessible input is required.\n\n Raises:\n RuntimeError: If dataset_dir does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter 'sampler'\n - Parameter 'shuffle'\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> flowers102_dataset_dir = \"/path/to/flowers102_dataset_directory\"\n >>> dataset = ds.Flowers102Dataset(dataset_dir=flowers102_dataset_dir,\n ... task=\"Classification\",\n ... usage=\"all\",\n ... decode=True)\n\n About Flowers102 dataset:\n\n Flowers102 dataset consists of 102 flower categories.\n The flowers commonly occur in the United Kingdom.\n Each class consists of between 40 and 258 images.\n\n Here is the original Flowers102 dataset structure.\n You can unzip the dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n .\n └── flowes102_dataset_dir\n ├── imagelabels.mat\n ├── setid.mat\n ├── jpg\n ├── image_00001.jpg\n ├── image_00002.jpg\n ├── ...\n ├── segmim\n ├── segmim_00001.jpg\n ├── segmim_00002.jpg\n ├── ...\n\n Citation:\n\n .. code-block::\n\n @InProceedings{Nilsback08,\n author = \"Maria-Elena Nilsback and Andrew Zisserman\",\n title = \"Automated Flower Classification over a Large Number of Classes\",\n booktitle = \"Indian Conference on Computer Vision, Graphics and Image Processing\",\n month = \"Dec\",\n year = \"2008\",\n }\n \"\"\"\n\n @check_flowers102dataset\n def __init__(self, dataset_dir, task=\"Classification\", usage=\"all\", num_samples=None, num_parallel_workers=1,\n shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None):\n self.dataset_dir = os.path.realpath(dataset_dir)\n self.task = replace_none(task, \"Classification\")\n self.usage = replace_none(usage, \"all\")\n self.decode = replace_none(decode, False)\n dataset = _Flowers102Dataset(self.dataset_dir, self.task, self.usage, self.decode)\n super().__init__(dataset, column_names=dataset.column_names, num_samples=num_samples,\n num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,\n num_shards=num_shards, shard_id=shard_id)\n\n def get_class_indexing(self):\n \"\"\"\n Get the class index.\n\n Returns:\n dict, a str-to-int mapping from label name to index.\n \"\"\"\n class_names = [\n \"pink primrose\", \"hard-leaved pocket orchid\", \"canterbury bells\",\n \"sweet pea\", \"english marigold\", \"tiger lily\", \"moon orchid\",\n \"bird of paradise\", \"monkshood\", \"globe thistle\", \"snapdragon\",\n \"colt's foot\", \"king protea\", \"spear thistle\", \"yellow iris\",\n \"globe-flower\", \"purple coneflower\", \"peruvian lily\", \"balloon flower\",\n \"giant white arum lily\", \"fire lily\", \"pincushion flower\", \"fritillary\",\n \"red ginger\", \"grape hyacinth\", \"corn poppy\", \"prince of wales feathers\",\n \"stemless gentian\", \"artichoke\", \"sweet william\", \"carnation\",\n \"garden phlox\", \"love in the mist\", \"mexican aster\", \"alpine sea holly\",\n \"ruby-lipped cattleya\", \"cape flower\", \"great masterwort\", \"siam tulip\",\n \"lenten rose\", \"barbeton daisy\", \"daffodil\", \"sword lily\", \"poinsettia\",\n \"bolero deep blue\", \"wallflower\", \"marigold\", \"buttercup\", \"oxeye daisy\",\n \"common dandelion\", \"petunia\", \"wild pansy\", \"primula\", \"sunflower\",\n \"pelargonium\", \"bishop of llandaff\", \"gaura\", \"geranium\", \"orange dahlia\",\n \"pink-yellow dahlia?\", \"cautleya spicata\", \"japanese anemone\",\n \"black-eyed susan\", \"silverbush\", \"californian poppy\", \"osteospermum\",\n \"spring crocus\", \"bearded iris\", \"windflower\", \"tree poppy\", \"gazania\",\n \"azalea\", \"water lily\", \"rose\", \"thorn apple\", \"morning glory\",\n \"passion flower\", \"lotus\", \"toad lily\", \"anthurium\", \"frangipani\",\n \"clematis\", \"hibiscus\", \"columbine\", \"desert-rose\", \"tree mallow\",\n \"magnolia\", \"cyclamen\", \"watercress\", \"canna lily\", \"hippeastrum\",\n \"bee balm\", \"ball moss\", \"foxglove\", \"bougainvillea\", \"camellia\", \"mallow\",\n \"mexican petunia\", \"bromelia\", \"blanket flower\", \"trumpet creeper\",\n \"blackberry lily\"\n ]\n\n class_dict = {}\n for i, class_name in enumerate(class_names):\n class_dict[class_name] = i\n\n return class_dict\n\n\nclass TextFileDataset(SourceDataset):\n \"\"\"\n A source dataset that reads and parses datasets stored on disk in text format.\n The generated dataset has one column :py:obj:`[text]` with type string.\n\n Args:\n dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a\n pattern of files. The list will be sorted in a lexicographical order.\n num_samples (int, optional): The number of samples to be included in the dataset\n (default=None, will include all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch\n (default=Shuffle.GLOBAL).\n If shuffle is False, no shuffling will be performed;\n If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL\n Otherwise, there are two levels of shuffling:\n\n - Shuffle.GLOBAL: Shuffle both the files and samples.\n\n - Shuffle.FILES: Shuffle files only.\n\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the maximum sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_files are not valid or do not exist.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n\n Examples:\n >>> text_file_dataset_dir = [\"/path/to/text_file_dataset_file\"] # contains 1 or multiple text files\n >>> dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir)\n \"\"\"\n\n @check_textfiledataset\n def __init__(self, dataset_files, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,\n num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,\n num_shards=num_shards, shard_id=shard_id, cache=cache)\n self.dataset_files = self._find_files(dataset_files)\n self.dataset_files.sort()\n\n def parse(self, children=None):\n return cde.TextFileNode(self.dataset_files, self.num_samples, self.shuffle_flag, self.num_shards,\n self.shard_id)\n\n\nclass _NumpySlicesDataset:\n \"\"\"\n Mainly for dealing with several kinds of formats of Python data, and return one row each time.\n \"\"\"\n\n def __init__(self, data, column_list=None):\n self.column_list = None\n # Convert dict data into tuple\n if isinstance(data, dict):\n data = self.process_dict(data)\n\n if isinstance(data, tuple):\n self.data = ()\n data_len = len(data)\n for i in range(data_len):\n self.data = self.data + (np.array(data[i]),)\n else:\n self.data = (np.array(data),)\n\n # check whether the data length in each column is equal\n data_len = [len(data_item) for data_item in self.data]\n if data_len[1:] != data_len[:-1]:\n raise ValueError(\"Data length in each column is not equal.\")\n\n # Init column_name\n if column_list is not None:\n self.column_list = column_list\n elif self.column_list is None:\n self.column_list = []\n column_num = len(self.data)\n for i in range(column_num):\n self.column_list.append(\"column_\" + str(i))\n\n def __getitem__(self, index):\n data_row = [d[index, ...] for d in self.data]\n data_res = tuple(data_row)\n return data_res\n\n def __len__(self):\n return len(self.data[0])\n\n def process_dict(self, input_data):\n \"\"\"\n Convert the dict like data into tuple format, when input is a tuple of dicts then compose it into a dict first.\n \"\"\"\n # Convert pandas like dict(has \"values\" column) into General dict\n data_keys = list(input_data.keys())\n data_col = input_data[data_keys[0]]\n if hasattr(data_col, \"values\"):\n new_dict = {}\n for key in data_keys:\n item1 = input_data.pop(key)\n new_dict[key] = item1.values\n input_data = new_dict\n\n # Convert the data in dict into tuple\n data = ()\n keys = list(input_data.keys())\n self.column_list = keys\n for key in keys:\n value = input_data[key]\n data = data + (list(value),)\n\n return data\n\n\nclass NumpySlicesDataset(GeneratorDataset):\n \"\"\"\n Creates a dataset with given data slices, mainly for loading Python data into dataset.\n\n The column names and column types of generated dataset depend on Python data defined by users.\n\n Args:\n data (Union[list, tuple, dict]) Input of given data. Supported data types include: list, tuple, dict and other\n NumPy formats. Input data will be sliced along the first dimension and generate additional rows, if input is\n list, there will be one column in each row, otherwise there tends to be multi columns. Large data is not\n recommended to be loaded in this way as data is loading into memory.\n column_names (list[str], optional): List of column names of the dataset (default=None). If column_names is not\n provided, the output column names will be named as the keys of dict when the input data is a dict,\n otherwise they will be named like column_0, column_1 ...\n num_samples (int, optional): The number of samples to be included in the dataset (default=None, all samples).\n num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.\n (default=None, expected order behavior shown in the table).\n sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible\n input is required (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n Random accessible input is required. When this argument is specified, `num_samples` reflects the max\n sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only\n when num_shards is also specified. Random accessible input is required.\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Raises:\n RuntimeError: If len of column_names does not match output len of data.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Examples:\n >>> # 1) Input data can be a list\n >>> data = [1, 2, 3]\n >>> dataset = ds.NumpySlicesDataset(data=data, column_names=[\"column_1\"])\n >>>\n >>> # 2) Input data can be a dictionary, and column_names will be its keys\n >>> data = {\"a\": [1, 2], \"b\": [3, 4]}\n >>> dataset = ds.NumpySlicesDataset(data=data)\n >>>\n >>> # 3) Input data can be a tuple of lists (or NumPy arrays), each tuple element refers to data in each column\n >>> data = ([1, 2], [3, 4], [5, 6])\n >>> dataset = ds.NumpySlicesDataset(data=data, column_names=[\"column_1\", \"column_2\", \"column_3\"])\n >>>\n >>> # 4) Load data from CSV file\n >>> import pandas as pd\n >>> df = pd.read_csv(filepath_or_buffer=csv_dataset_dir[0])\n >>> dataset = ds.NumpySlicesDataset(data=dict(df), shuffle=False)\n \"\"\"\n\n @check_numpyslicesdataset\n def __init__(self, data, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None,\n num_shards=None, shard_id=None):\n dataset = _NumpySlicesDataset(data, column_names)\n super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,\n num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,\n num_shards=num_shards, shard_id=shard_id)\n\n\nclass _PaddedDataset:\n \"\"\"\n Mainly for combining false samples provided by users into a dataset.\n\n Args:\n padded_samples (list(dict)): Data provided by user to be added to the initial Dataset.\n \"\"\"\n\n def __init__(self, padded_samples):\n self.column_names = list(padded_samples[0].keys())\n self.padded_samples = padded_samples\n\n def __getitem__(self, item):\n return (self.padded_samples[item][key] for key in self.column_names)\n\n def __len__(self):\n return len(self.padded_samples)\n\n\nclass PaddedDataset(GeneratorDataset):\n \"\"\"\n Creates a dataset with filler data provided by user. Mainly used to add to the original data set\n and assign it to the corresponding shard.\n\n Args:\n padded_samples (list(dict)): Samples provided by user.\n\n Raises:\n TypeError: If padded_samples is not an instance of list.\n TypeError: If the element of padded_samples is not an instance of dict.\n ValueError: If the padded_samples is empty.\n\n Examples:\n >>> import numpy as np\n >>> data = [{'image': np.zeros(1, np.uint8)}, {'image': np.zeros(2, np.uint8)}]\n >>> dataset = ds.PaddedDataset(padded_samples=data)\n \"\"\"\n\n @check_paddeddataset\n def __init__(self, padded_samples):\n dataset = _PaddedDataset(padded_samples)\n super().__init__(dataset, column_names=dataset.column_names, num_shards=None, shard_id=None, shuffle=False)\n self._dataset_size = len(dataset.padded_samples)\n self.padded_samples = padded_samples\n\n\nclass EMnistDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing the EMNIST dataset.\n\n The generated dataset has two columns :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar of the uint32 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n name (str): Name of splits for this dataset, can be \"byclass\", \"bymerge\", \"balanced\", \"letters\", \"digits\"\n or \"mnist\".\n usage (str, optional): Usage of this dataset, can be \"train\", \"test\" or \"all\".\n (default=None, will read all samples).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the max sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> emnist_dataset_dir = \"/path/to/emnist_dataset_directory\"\n >>>\n >>> # Read 3 samples from EMNIST dataset\n >>> dataset = ds.EMnistDataset(dataset_dir=emnist_dataset_dir, name=\"mnist\", num_samples=3)\n >>>\n >>> # Note: In emnist_dataset dataset, each dictionary has keys \"image\" and \"label\"\n\n About EMNIST dataset:\n\n The EMNIST dataset is a set of handwritten character digits derived from the NIST Special\n Database 19 and converted to a 28x28 pixel image format and dataset structure that directly\n matches the MNIST dataset. Further information on the dataset contents and conversion process\n can be found in the paper available at https://arxiv.org/abs/1702.05373v1.\n\n The numbers of characters and classes of each split of EMNIST are as follows:\n\n By Class: 814,255 characters and 62 unbalanced classes.\n By Merge: 814,255 characters and 47 unbalanced classes.\n Balanced: 131,600 characters and 47 balanced classes.\n Letters: 145,600 characters and 26 balanced classes.\n Digits: 280,000 characters and 10 balanced classes.\n MNIST: 70,000 characters and 10 balanced classes.\n\n Here is the original EMNIST dataset structure.\n You can unzip the dataset files into this directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── mnist_dataset_dir\n ├── emnist-mnist-train-images-idx3-ubyte\n ├── emnist-mnist-train-labels-idx1-ubyte\n ├── emnist-mnist-test-images-idx3-ubyte\n ├── emnist-mnist-test-labels-idx1-ubyte\n ├── ...\n\n Citation:\n\n .. code-block::\n\n @article{cohen_afshar_tapson_schaik_2017,\n title = {EMNIST: Extending MNIST to handwritten letters},\n DOI = {10.1109/ijcnn.2017.7966217},\n journal = {2017 International Joint Conference on Neural Networks (IJCNN)},\n author = {Cohen, Gregory and Afshar, Saeed and Tapson, Jonathan and Schaik, Andre Van},\n year = {2017},\n howpublished = {https://www.westernsydney.edu.au/icns/reproducible_research/\n publication_support_materials/emnist}\n }\n \"\"\"\n\n @check_emnist_dataset\n def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,\n shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.name = name\n self.usage = replace_none(usage, \"all\")\n\n def parse(self, children=None):\n return cde.EMnistNode(self.dataset_dir, self.name, self.usage, self.sampler)\n\n\nclass FakeImageDataset(MappableDataset):\n \"\"\"\n A source dataset for generating fake images.\n\n The generated dataset has two columns :py:obj:`[image, label]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`label` is a scalar of the uint32 type.\n\n Args:\n num_images (int, optional): Number of images to generate in the dataset (default=1000).\n image_size (tuple, optional): Size of the fake image (default=(224, 224, 3)).\n num_classes (int, optional): Number of classes in the dataset (default=10).\n base_seed (int, optional): Offsets the index-based random seed used to generate each image (default=0).\n num_samples (int, optional): The number of images to be included in the dataset\n (default=None, will read all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, will use value set in the config).\n shuffle (bool, optional): Whether or not to perform shuffle on the dataset\n (default=None, expected order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).\n When this argument is specified, `num_samples` reflects the max sample number of per shard.\n shard_id (int, optional): The shard ID within `num_shards` (default=None). This\n argument can only be specified when `num_shards` is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter 'sampler'\n - Parameter 'shuffle'\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> # Read 3 samples from FakeImage dataset\n >>> dataset = ds.FakeImageDataset(num_images=1000, image_size=(224,224,3),\n ... num_classes=10, base_seed=0, num_samples=3)\n >>>\n >>> # Note: In FakeImage dataset, each dictionary has keys \"image\" and \"label\"\n \"\"\"\n\n @check_fake_image_dataset\n def __init__(self, num_images=1000, image_size=(224, 224, 3), num_classes=10, base_seed=0, num_samples=None,\n num_parallel_workers=None, shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.num_images = num_images\n self.image_size = image_size\n self.num_classes = num_classes\n self.base_seed = base_seed\n\n def parse(self, children=None):\n return cde.FakeImageNode(self.num_images, self.image_size, self.num_classes, self.base_seed, self.sampler)\n\n\nclass FlickrDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing Flickr8k and Flickr30k dataset.\n\n The generated dataset has two columns :py:obj:`[image, annotation]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`annotation` is a tensor which contains 5 annotations string,\n such as [\"a\", \"b\", \"c\", \"d\", \"e\"].\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n annotation_file (str): Path to the root directory that contains the annotation.\n num_samples (int, optional): The number of images to be included in the dataset.\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n decode (bool, optional): Decode the images after reading (default=False).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the max sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir is not valid or does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If dataset_dir is not exist.\n ValueError: If annotation_file is not exist.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> flickr_dataset_dir = \"/path/to/flickr_dataset_directory\"\n >>> annotation_file = \"/path/to/flickr_annotation_file\"\n >>>\n >>> # 1) Get all samples from FLICKR dataset in sequence\n >>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,\n ... annotation_file=annotation_file,\n ... shuffle=False)\n >>>\n >>> # 2) Randomly select 350 samples from FLICKR dataset\n >>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,\n ... annotation_file=annotation_file,\n ... num_samples=350,\n ... shuffle=True)\n >>>\n >>> # 3) Get samples from FLICKR dataset for shard 0 in a 2-way distributed training\n >>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,\n ... annotation_file=annotation_file,\n ... num_shards=2,\n ... shard_id=0)\n >>>\n >>> # In FLICKR dataset, each dictionary has keys \"image\" and \"annotation\"\n\n About Flickr8k dataset:\n\n The Flickr8k dataset consists of 8092 colour images. There are 40460 annotations in the Flickr8k.token.txt,\n each image has 5 annotations.\n\n You can unzip the dataset files into the following directory structure and read by MindSpore's API.\n\n .. code-block::\n\n .\n └── Flickr8k\n ├── Flickr8k_Dataset\n │ ├── 1000268201_693b08cb0e.jpg\n │ ├── 1001773457_577c3a7d70.jpg\n │ ├── ...\n └── Flickr8k.token.txt\n\n Citation:\n\n .. code-block::\n\n @article{DBLP:journals/jair/HodoshYH13,\n author = {Micah Hodosh and Peter Young and Julia Hockenmaier},\n title = {Framing Image Description as a Ranking Task: Data, Models and Evaluation Metrics},\n journal = {J. Artif. Intell. Res.},\n volume = {47},\n pages = {853--899},\n year = {2013},\n url = {https://doi.org/10.1613/jair.3994},\n doi = {10.1613/jair.3994},\n timestamp = {Mon, 21 Jan 2019 15:01:17 +0100},\n biburl = {https://dblp.org/rec/journals/jair/HodoshYH13.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n }\n\n About Flickr30k dataset:\n\n The Flickr30k dataset consists of 31783 colour images. There are 158915 annotations in\n the results_20130124.token, each image has 5 annotations.\n\n You can unzip the dataset files into the following directory structure and read by MindSpore's API.\n\n Citation:\n\n .. code-block::\n\n .\n └── Flickr30k\n ├── flickr30k-images\n │ ├── 1000092795.jpg\n │ ├── 10002456.jpg\n │ ├── ...\n └── results_20130124.token\n\n .. code-block::\n\n @article{DBLP:journals/tacl/YoungLHH14,\n author = {Peter Young and Alice Lai and Micah Hodosh and Julia Hockenmaier},\n title = {From image descriptions to visual denotations: New similarity metrics\n for semantic inference over event descriptions},\n journal = {Trans. Assoc. Comput. Linguistics},\n volume = {2},\n pages = {67--78},\n year = {2014},\n url = {https://tacl2013.cs.columbia.edu/ojs/index.php/tacl/article/view/229},\n timestamp = {Wed, 17 Feb 2021 21:55:25 +0100},\n biburl = {https://dblp.org/rec/journals/tacl/YoungLHH14.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n }\n \"\"\"\n\n @check_flickr_dataset\n def __init__(self, dataset_dir, annotation_file, num_samples=None, num_parallel_workers=None, shuffle=None,\n decode=None, sampler=None, num_shards=None, shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.annotation_file = annotation_file\n self.decode = replace_none(decode, False)\n\n def parse(self, children=None):\n return cde.FlickrNode(self.dataset_dir, self.annotation_file, self.decode, self.sampler)\n\n\nclass SBDataset(GeneratorDataset):\n \"\"\"\n A source dataset for reading and parsing Semantic Boundaries Dataset.\n\n The generated dataset has two columns: :py:obj:`[image, task]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`task` contains 20 images of the uint8 type if `task` is `Boundaries` otherwise\n contains 1 image of the uint8 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n task (str, optional): Acceptable tasks include `Boundaries` or `Segmentation` (default=`Boundaries`).\n usage (str, optional): Acceptable usages include `train`, `val`, `train_noval` and `all` (default=`all`).\n num_samples (int, optional): The number of images to be included in the dataset.\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the max sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n\n Raises:\n RuntimeError: If dataset_dir is not valid or does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If dataset_dir is not exist.\n ValueError: If task is not in [`Boundaries`, `Segmentation`].\n ValueError: If usage is not in [`train`, `val`, `train_noval`, `all`].\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> sb_dataset_dir = \"/path/to/sb_dataset_directory\"\n >>>\n >>> # 1) Get all samples from Semantic Boundaries Dataset in sequence\n >>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, shuffle=False)\n >>>\n >>> # 2) Randomly select 350 samples from Semantic Boundaries Dataset\n >>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_samples=350, shuffle=True)\n >>>\n >>> # 3) Get samples from Semantic Boundaries Dataset for shard 0 in a 2-way distributed training\n >>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_shards=2, shard_id=0)\n >>>\n >>> # In Semantic Boundaries Dataset, each dictionary has keys \"image\" and \"task\"\n\n About Semantic Boundaries Dataset:\n\n The Semantic Boundaries Dataset consists of 11355 colour images. There are 8498 images' name in the train.txt,\n 2857 images' name in the val.txt and 5623 images' name in the train_noval.txt. The category cls/\n contains the Segmentation and Boundaries results of category-level, the category inst/ catains the\n Segmentation and Boundaries results of instance-level.\n\n You can unzip the dataset files into the following structure and read by MindSpore's API:\n\n .. code-block::\n\n .\n └── benchmark_RELEASE\n ├── dataset\n ├── img\n │ ├── 2008_000002.jpg\n │ ├── 2008_000003.jpg\n │ ├── ...\n ├── cls\n │ ├── 2008_000002.mat\n │ ├── 2008_000003.mat\n │ ├── ...\n ├── inst\n │ ├── 2008_000002.mat\n │ ├── 2008_000003.mat\n │ ├── ...\n ├── train.txt\n └── val.txt\n\n .. code-block::\n\n @InProceedings{BharathICCV2011,\n author = \"Bharath Hariharan and Pablo Arbelaez and Lubomir Bourdev and\n Subhransu Maji and Jitendra Malik\",\n title = \"Semantic Contours from Inverse Detectors\",\n booktitle = \"International Conference on Computer Vision (ICCV)\",\n year = \"2011\",\n \"\"\"\n\n @check_sb_dataset\n def __init__(self, dataset_dir, task='Boundaries', usage='all', num_samples=None, num_parallel_workers=1,\n shuffle=None, decode=None, sampler=None, num_shards=None, shard_id=None):\n dataset = _SBDataset(dataset_dir, task, usage, decode)\n super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,\n num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,\n num_shards=num_shards, shard_id=shard_id)\n\n\nclass _SBDataset:\n \"\"\"\n Dealing with the data file with .mat extension, and return one row in tuple (image, task) each time.\n \"\"\"\n\n def __init__(self, dataset_dir, task, usage, decode):\n self.column_list = ['image', 'task']\n self.task = task\n self.images_path = os.path.join(dataset_dir, 'img')\n self.cls_path = os.path.join(dataset_dir, 'cls')\n self._loadmat = loadmat\n self.categories = 20\n self.decode = replace_none(decode, False)\n\n if usage == \"all\":\n image_names = []\n for item in [\"train\", \"val\"]:\n usage_path = os.path.join(dataset_dir, item + '.txt')\n if not os.path.exists(usage_path):\n raise FileNotFoundError(\"SBDataset: {0} not found\".format(usage_path))\n with open(usage_path, 'r') as f:\n image_names += [x.strip() for x in f.readlines()]\n else:\n usage_path = os.path.join(dataset_dir, usage + '.txt')\n if not os.path.exists(usage_path):\n raise FileNotFoundError(\"SBDataset: {0} not found\".format(usage_path))\n with open(usage_path, 'r') as f:\n image_names = [x.strip() for x in f.readlines()]\n\n self.images = [os.path.join(self.images_path, i + \".jpg\") for i in image_names]\n self.clss = [os.path.join(self.cls_path, i + \".mat\") for i in image_names]\n\n if len(self.images) != len(self.clss):\n raise ValueError(\"SBDataset: images count not equal to cls count\")\n\n self._get_data = self._get_boundaries_data if self.task == \"Boundaries\" else self._get_segmentation_data\n self._get_item = self._get_decode_item if self.decode else self._get_undecode_item\n\n def _get_boundaries_data(self, mat_path):\n mat_data = self._loadmat(mat_path)\n return np.concatenate([np.expand_dims(mat_data['GTcls'][0][self.task][0][i][0].toarray(), axis=0)\n for i in range(self.categories)], axis=0)\n\n def _get_segmentation_data(self, mat_path):\n mat_data = self._loadmat(mat_path)\n return Image.fromarray(mat_data['GTcls'][0][self.task][0])\n\n def _get_decode_item(self, idx):\n return Image.open(self.images[idx]).convert('RGB'), self._get_data(self.clss[idx])\n\n def _get_undecode_item(self, idx):\n return np.fromfile(self.images[idx], dtype=np.uint8), self._get_data(self.clss[idx])\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n return self._get_item(idx)\n\n\nclass DeserializedDataset(Dataset):\n def __init__(self, input_obj):\n super().__init__()\n self.input_obj = input_obj\n\n def parse(self, children=None):\n if isinstance(self.input_obj, dict):\n json_str = json.dumps(self.input_obj)\n return cde.Dataset.from_json_string(json_str)\n return cde.Dataset.from_json_file(self.input_obj)\n\n\nclass CityscapesDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing Cityscapes dataset.\n\n The generated dataset has two columns :py:obj:`[image, task]`.\n The tensor of column :py:obj:`image` is of the uint8 type.\n The tensor of column :py:obj:`task` is of the uint8 type if task is not 'polygon' otherwise task is\n a string tensor with serialize json.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str): Acceptable usages include `train`, `test`, `val` or `all` if quality_mode is `fine`\n otherwise `train`, `train_extra`, `val` or `all` (default=`train`).\n quality_mode (str): Acceptable quality_modes include `fine` or `coarse` (default=`fine`).\n task (str): Acceptable tasks include `instance`, `semantic`, `polygon` or `color` (default=`instance`).\n num_samples (int, optional): The number of images to be included in the dataset.\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n decode (bool, optional): Decode the images after reading (default=False).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the max sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir is invalid or does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If dataset_dir is not exist.\n ValueError: If task is invalid.\n ValueError: If quality_mode is invalid.\n ValueError: If usage is invalid.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> cityscapes_dataset_dir = \"/path/to/cityscapes_dataset_directory\"\n >>>\n >>> # 1) Get all samples from Cityscapes dataset in sequence\n >>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, task=\"instance\", quality_mode=\"fine\",\n >>> usage=\"train\", shuffle=False, num_parallel_workers=1)\n >>>\n >>> # 2) Randomly select 350 samples from Cityscapes dataset\n >>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_samples=350, shuffle=True,\n >>> num_parallel_workers=1)\n >>>\n >>> # 3) Get samples from Cityscapes dataset for shard 0 in a 2-way distributed training\n >>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_shards=2, shard_id=0,\n >>> num_parallel_workers=1)\n >>>\n >>> # In Cityscapes dataset, each dictionary has keys \"image\" and \"task\"\n\n About Cityscapes dataset:\n\n The Cityscapes dataset consists of 5000 colour images with high quality dense pixel annotations and\n 19998 colour images with coarser polygonal annotations in 50 cities. There are 30 classes in this\n dataset and the polygonal annotations include dense semantic segmentation and instance segmentation\n for vehicle and people.\n\n You can unzip the dataset files into the following directory structure and read by MindSpore's API.\n\n Taking the quality_mode of `fine` as an example.\n\n .. code-block::\n\n .\n └── Cityscapes\n ├── leftImg8bit\n | ├── train\n | | ├── aachen\n | | | ├── aachen_000000_000019_leftImg8bit.png\n | | | ├── aachen_000001_000019_leftImg8bit.png\n | | | ├── ...\n | | ├── bochum\n | | | ├── ...\n | | ├── ...\n | ├── test\n | | ├── ...\n | ├── val\n | | ├── ...\n └── gtFine\n ├── train\n | ├── aachen\n | | ├── aachen_000000_000019_gtFine_color.png\n | | ├── aachen_000000_000019_gtFine_instanceIds.png\n | | ├── aachen_000000_000019_gtFine_labelIds.png\n | | ├── aachen_000000_000019_gtFine_polygons.json\n | | ├── aachen_000001_000019_gtFine_color.png\n | | ├── aachen_000001_000019_gtFine_instanceIds.png\n | | ├── aachen_000001_000019_gtFine_labelIds.png\n | | ├── aachen_000001_000019_gtFine_polygons.json\n | | ├── ...\n | ├── bochum\n | | ├── ...\n | ├── ...\n ├── test\n | ├── ...\n └── val\n ├── ...\n\n Citation:\n\n .. code-block::\n\n @inproceedings{Cordts2016Cityscapes,\n title = {The Cityscapes Dataset for Semantic Urban Scene Understanding},\n author = {Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler,\n Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},\n booktitle = {Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},\n year = {2016}\n }\n \"\"\"\n\n @check_cityscapes_dataset\n def __init__(self, dataset_dir, usage=\"train\", quality_mode=\"fine\", task=\"instance\", num_samples=None,\n num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,\n shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.task = task\n self.quality_mode = quality_mode\n self.usage = usage\n self.decode = replace_none(decode, False)\n\n def parse(self, children=None):\n return cde.CityscapesNode(self.dataset_dir, self.usage, self.quality_mode, self.task, self.decode, self.sampler)\n\n\nclass DIV2KDataset(MappableDataset):\n \"\"\"\n A source dataset for reading and parsing DIV2KDataset dataset.\n\n The generated dataset has two columns :py:obj:`[hr_image, lr_image]`.\n The tensor of column :py:obj:`hr_image` is of the uint8 type.\n The tensor of column :py:obj:`lr_image` is of the uint8 type.\n\n Args:\n dataset_dir (str): Path to the root directory that contains the dataset.\n usage (str): Acceptable usages include `train`, `valid` or `all` (default=`train`).\n downgrade (str): Acceptable downgrades include `bicubic`, `unknown`, `mild`, `difficult` or\n `wild` (default=`bicubic`).\n scale (int): Acceptable scales include 2, 3, 4 or 8 (default=2).\n When `downgrade` is `bicubic`, scale can be 2, 3, 4, 8.\n When `downgrade` is `unknown`, scale can only be 2, 3, 4.\n When `downgrade` is `mild`, `difficult` or `wild`, scale can only be 4.\n num_samples (int, optional): The number of images to be included in the dataset.\n (default=None, all images).\n num_parallel_workers (int, optional): Number of workers to read the data\n (default=None, number set in the config).\n shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected\n order behavior shown in the table).\n decode (bool, optional): Decode the images after reading (default=False).\n sampler (Sampler, optional): Object used to choose samples from the\n dataset (default=None, expected order behavior shown in the table).\n num_shards (int, optional): Number of shards that the dataset will be divided\n into (default=None). When this argument is specified, `num_samples` reflects\n the max sample number of per shard.\n shard_id (int, optional): The shard ID within num_shards (default=None). This\n argument can only be specified when num_shards is also specified.\n cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.\n (default=None, which means no cache is used).\n\n Raises:\n RuntimeError: If dataset_dir is invalid or does not contain data files.\n RuntimeError: If num_parallel_workers exceeds the max thread numbers.\n RuntimeError: If sampler and shuffle are specified at the same time.\n RuntimeError: If sampler and sharding are specified at the same time.\n RuntimeError: If num_shards is specified but shard_id is None.\n RuntimeError: If shard_id is specified but num_shards is None.\n ValueError: If dataset_dir is not exist.\n ValueError: If usage is invalid.\n ValueError: If downgrade is invalid.\n ValueError: If scale is invalid.\n ValueError: If scale equal to 8 and downgrade not equal to `bicubic`.\n ValueError: If downgrade in [`mild`, `difficult`, `wild`] and scale not equal to 4.\n ValueError: If shard_id is invalid (< 0 or >= num_shards).\n\n Note:\n - This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.\n The table below shows what input arguments are allowed and their expected behavior.\n\n .. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`\n :widths: 25 25 50\n :header-rows: 1\n\n * - Parameter `sampler`\n - Parameter `shuffle`\n - Expected Order Behavior\n * - None\n - None\n - random order\n * - None\n - True\n - random order\n * - None\n - False\n - sequential order\n * - Sampler object\n - None\n - order defined by sampler\n * - Sampler object\n - True\n - not allowed\n * - Sampler object\n - False\n - not allowed\n\n Examples:\n >>> div2k_dataset_dir = \"/path/to/div2k_dataset_directory\"\n >>>\n >>> # 1) Get all samples from DIV2K dataset in sequence\n >>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage=\"train\", scale=2, downgrade=\"bicubic\",\n >>> shuffle=False)\n >>>\n >>> # 2) Randomly select 350 samples from DIV2K dataset\n >>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage=\"train\", scale=2, downgrade=\"bicubic\",\n >>> num_samples=350, shuffle=True)\n >>>\n >>> # 3) Get samples from DIV2K dataset for shard 0 in a 2-way distributed training\n >>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage=\"train\", scale=2, downgrade=\"bicubic\",\n >>> num_shards=2, shard_id=0)\n >>>\n >>> # In DIV2K dataset, each dictionary has keys \"hr_image\" and \"lr_image\"\n\n About DIV2K dataset:\n\n The DIV2K dataset consists of 1000 2K resolution images, among which 800 images are for training, 100 images\n are for validation and 100 images are for testing. NTIRE 2017 and NTIRE 2018 include only training dataset\n and validation dataset.\n\n You can unzip the dataset files into the following directory structure and read by MindSpore's API.\n\n Take the training set as an example.\n\n .. code-block::\n\n .\n └── DIV2K\n ├── DIV2K_train_HR\n | ├── 0001.png\n | ├── 0002.png\n | ├── ...\n ├── DIV2K_train_LR_bicubic\n | ├── X2\n | | ├── 0001x2.png\n | | ├── 0002x2.png\n | | ├── ...\n | ├── X3\n | | ├── 0001x3.png\n | | ├── 0002x3.png\n | | ├── ...\n | └── X4\n | ├── 0001x4.png\n | ├── 0002x4.png\n | ├── ...\n ├── DIV2K_train_LR_unknown\n | ├── X2\n | | ├── 0001x2.png\n | | ├── 0002x2.png\n | | ├── ...\n | ├── X3\n | | ├── 0001x3.png\n | | ├── 0002x3.png\n | | ├── ...\n | └── X4\n | ├── 0001x4.png\n | ├── 0002x4.png\n | ├── ...\n ├── DIV2K_train_LR_mild\n | ├── 0001x4m.png\n | ├── 0002x4m.png\n | ├── ...\n ├── DIV2K_train_LR_difficult\n | ├── 0001x4d.png\n | ├── 0002x4d.png\n | ├── ...\n ├── DIV2K_train_LR_wild\n | ├── 0001x4w.png\n | ├── 0002x4w.png\n | ├── ...\n └── DIV2K_train_LR_x8\n ├── 0001x8.png\n ├── 0002x8.png\n ├── ...\n Citation:\n\n .. code-block::\n\n @InProceedings{Agustsson_2017_CVPR_Workshops,\n author = {Agustsson, Eirikur and Timofte, Radu},\n title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},\n booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},\n url = \"http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf\",\n month = {July},\n year = {2017}\n }\n \"\"\"\n\n @check_div2k_dataset\n def __init__(self, dataset_dir, usage=\"train\", downgrade=\"bicubic\", scale=2, num_samples=None,\n num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,\n shard_id=None, cache=None):\n super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,\n shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)\n\n self.dataset_dir = dataset_dir\n self.usage = usage\n self.scale = scale\n self.downgrade = downgrade\n self.decode = replace_none(decode, False)\n\n def parse(self, children=None):\n return cde.DIV2KNode(self.dataset_dir, self.usage, self.downgrade, self.scale, self.decode, self.sampler)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test_tensor_slice \"\"\"\nimport numpy as np\nimport pytest\n\nfrom mindspore import Tensor\nfrom mindspore import Parameter\nfrom mindspore import context\nfrom mindspore import dtype as mstype\nfrom mindspore.nn import Cell\nfrom mindspore.common.parameter import ParameterTuple\nfrom mindspore.ops import composite as C\n\n\ngrad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True)\n\n\ndef setup_module():\n context.set_context(mode=context.PYNATIVE_MODE)\n\n\nclass NetWorkSlicePositive(Cell):\n def __init__(self):\n super(NetWorkSlicePositive, self).__init__()\n self.tensor_ret0 = Tensor(np.ones([1, 2, 3], np.int32))\n self.tensor_ret1 = Tensor(np.ones([4, 8, 10], np.int32))\n self.tensor_ret2 = Tensor(np.ones([6, 8, 10], np.int32))\n self.tensor_ret3 = Tensor(np.ones([3, 8, 10], np.int32))\n\n def construct(self, tensor):\n ret0 = tensor[3:4:1, 1:5:2, 3:6:1] + self.tensor_ret0\n ret1 = tensor[-6:4:1, 0:8:1, ::1] + self.tensor_ret1\n ret2 = tensor[::, ::, ::] + self.tensor_ret2\n ret3 = tensor[::2] + self.tensor_ret3\n return ret0, ret1, ret2, ret3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_slice_positive():\n net = NetWorkSlicePositive()\n input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)\n input_0 = Tensor(input_np)\n output0, output1, output2, output3 = net(input_0)\n assert np.all(output0.asnumpy() == input_np[3:4:1, 1:5:2, 3:6:1] + np.ones([1, 2, 3]))\n assert np.all(output1.asnumpy() == input_np[-6:4:1, 0:8:1, ::1] + np.ones([4, 8, 10]))\n assert np.all(output2.asnumpy() == input_np[::, ::, ::] + np.ones([6, 8, 10]))\n assert np.all(output3.asnumpy() == input_np[::2] + np.ones([3, 8, 10]))\n\n\nclass NetWorkSliceEllipsis(Cell):\n def __init__(self):\n super(NetWorkSliceEllipsis, self).__init__()\n self.tensor_ret0 = Tensor(np.ones([2, 7, 8], np.int32))\n self.tensor_ret1 = Tensor(np.ones([6, 7, 8, 9], np.int32))\n self.tensor_ret2 = Tensor(np.ones([1, 6, 7, 8, 9], np.int32))\n\n def construct(self, tensor):\n ret0 = tensor[0:4:2, ..., 1] + self.tensor_ret0\n ret1 = tensor[...] + self.tensor_ret1\n ret2 = tensor[None] + self.tensor_ret2\n ret3 = tensor[True] + self.tensor_ret2\n return ret0, ret1, ret2, ret3\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_slice_ellipsis():\n net = NetWorkSliceEllipsis()\n input_np = np.arange(6*7*8*9).reshape(6, 7, 8, 9).astype(np.int32)\n input_0 = Tensor(input_np)\n output0, output1, output2, output3 = net(input_0)\n assert np.all(output0.asnumpy() == input_np[0:4:2, ..., 1] + np.ones([2, 7, 8]))\n assert np.all(output1.asnumpy() == input_np[...] + np.ones([6, 7, 8, 9]))\n assert np.all(output2.asnumpy() == input_np[None] + np.ones([6, 7, 8, 9]))\n assert np.all(output3.asnumpy() == input_np[True] + np.ones([1, 6, 7, 8, 9]))\n\n\nclass NetWorkReduceDimension(Cell):\n def __init__(self):\n super(NetWorkReduceDimension, self).__init__()\n self.tensor_ret1 = Tensor(np.ones([3, 10], np.int32))\n self.tensor_ret2 = Tensor(np.ones([6, 8], np.int32))\n self.tensor_ret3 = Tensor(np.array(8, np.int32))\n self.tensor_ret4 = Tensor(np.ones([8, 10], np.int32))\n\n def construct(self, tensor):\n ret1 = tensor[::2, 1, ::1] + self.tensor_ret1\n ret2 = tensor[::, ::, 0] + self.tensor_ret2\n ret3 = tensor[3, 2, 5] + self.tensor_ret3\n ret4 = tensor[1] + self.tensor_ret4\n return ret1, ret2, ret3, ret4\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_reduce_dimension():\n net = NetWorkReduceDimension()\n input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)\n input_0 = Tensor(input_np)\n output1, output2, output3, output4 = net(input_0)\n assert np.all(output1.asnumpy() == input_np[::2, 1, ::1] + np.ones([3, 10]))\n assert np.all(output2.asnumpy() == input_np[::, ::, 0] + np.ones([6, 8]))\n assert np.all(output3.asnumpy() == input_np[3, 2, 5] + np.array(8, np.int32))\n assert np.all(output4.asnumpy() == input_np[1] + np.ones([8, 10]))\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\nclass NetWorkSliceStep(Cell):\n def __init__(self):\n super(NetWorkSliceStep, self).__init__()\n self.tensor_ret1 = Tensor(np.ones([6, 5, 10], np.int32))\n self.tensor_ret2 = Tensor(np.ones([3, 5, 5], np.int32))\n\n def construct(self, tensor):\n ret1 = tensor[::1, -5::, ::-1] + self.tensor_ret1\n ret2 = tensor[::2, -5::, ::2] + self.tensor_ret2\n return ret1, ret2\n\n\[email protected]\n# ascend op stridedslice has bug, and has not been fixed.\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_step_negative():\n net = NetWorkSliceStep()\n input_np = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)\n input_0 = Tensor(input_np)\n output1, output2 = net(input_0)\n assert np.all(output1.asnumpy() == input_np[::1, -5::, ::-1] + np.ones([6, 5, 10]))\n assert np.all(output2.asnumpy() == input_np[::2, -5::, ::2] + np.ones([3, 5, 5]))\n\n\nclass TensorGetItemByThreeTensors(Cell):\n def __init__(self):\n super(TensorGetItemByThreeTensors, self).__init__()\n self.const0 = Tensor(np.ones((4, 5, 8, 10)), mstype.int32)\n self.const1 = Tensor(np.ones((3, 4, 5, 10)), mstype.int32)\n self.const2 = Tensor(np.ones((5, 3, 4, 5)), mstype.int32)\n\n def construct(self, x, index_0, index_1, index_2):\n ret0 = x[index_0] + self.const0\n ret1 = x[index_0, index_1] + self.const1\n ret2 = x[index_0, index_1, index_2] + self.const2\n return ret0, ret1, ret2\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_getitem_by_tensors():\n \"\"\"This testcase may encounter a sync stream error occasionally\"\"\"\n net = TensorGetItemByThreeTensors()\n input_x = np.arange(6*8*10).reshape(6, 8, 10).astype(np.int32)\n index_0 = np.random.randint(6, size=(3, 4, 5)).astype(np.int32)\n index_1 = np.random.randint(6, size=(4, 5)).astype(np.int32)\n index_2 = np.random.randint(6, size=(5, 3, 4, 5)).astype(np.int32)\n input_x_ms = Tensor(input_x)\n index_0_ms = Tensor(index_0)\n index_1_ms = Tensor(index_1)\n input_2_ms = Tensor(index_2)\n output0, output1, output2 = net(input_x_ms, index_0_ms, index_1_ms, input_2_ms)\n assert np.all(output0.asnumpy() == input_x[index_0] + np.ones([4, 5, 8, 10]))\n assert np.all(output1.asnumpy() == input_x[index_0, index_1] + np.ones([3, 4, 5, 10]))\n assert np.all(output2.asnumpy() == input_x[index_0, index_1, index_2] + np.ones([5, 3, 4, 5]))\n\n\nclass TensorGetItemByMixedTensorsBasicCase(Cell):\n def __init__(self, c0, c1, c2, c3, c4, c5):\n super(TensorGetItemByMixedTensorsBasicCase, self).__init__()\n self.const0 = Tensor(c0)\n self.const1 = Tensor(c1)\n self.const2 = Tensor(c2)\n self.const3 = Tensor(c3)\n self.const4 = Tensor(c4)\n self.const5 = Tensor(c5)\n\n def construct(self, tensor, index_0, index_1):\n ret0 = tensor[index_0, index_1, 0:3] + self.const0\n ret1 = tensor[0:3, index_0, ...] + self.const1\n ret2 = tensor[0, index_0, index_1] + self.const2\n ret3 = tensor[..., index_0, 0:3] + self.const3\n ret4 = tensor[0:2, index_0, index_1] + self.const4\n ret5 = tensor[..., index_0, index_1] + self.const5\n return ret0, ret1, ret2, ret3, ret4, ret5\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_getitem_by_mixed_tensors():\n const0 = np.ones((3, 4, 5, 3), np.float32)\n const1 = np.ones((3, 3, 4, 5, 5), np.float32)\n const2 = np.ones((3, 4, 5), np.float32)\n const3 = np.ones((3, 3, 4, 5, 3), np.float32)\n const4 = np.ones((2, 3, 4, 5), np.float32)\n const5 = np.ones((3, 3, 4, 5), np.float32)\n net = TensorGetItemByMixedTensorsBasicCase(const0, const1, const2, const3, const4, const5)\n input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)\n input_ms = Tensor(input_np, mstype.float32)\n index_np_0 = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)\n index_np_1 = np.random.randint(4, size=(4, 5)).astype(np.int32)\n index_0 = Tensor(index_np_0, mstype.int32)\n index_1 = Tensor(index_np_1, mstype.int32)\n out0, out1, out2, out3, out4, out5 = net(input_ms, index_0, index_1)\n assert np.all(out0.asnumpy() == (input_np[index_np_0, index_np_1, 0:3] + const0))\n assert np.all(out1.asnumpy() == (input_np[0:3, index_np_0, ...] + const1))\n assert np.all(out2.asnumpy() == (input_np[0, index_np_0, index_np_1] + const2))\n assert np.all(out3.asnumpy() == (input_np[..., index_np_0, 0:3] + const3))\n assert np.all(out4.asnumpy() == (input_np[0:2, index_np_0, index_np_1] + const4))\n assert np.all(out5.asnumpy() == (input_np[..., index_np_0, index_np_1] + const5))\n\n\nclass TensorItemByNone(Cell):\n def construct(self, tensor):\n ret = tensor.item()\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_item_by_none():\n net = TensorItemByNone()\n input_1d_np = np.ndarray([1]).astype(np.float32)\n input_1d_ms = Tensor(input_1d_np, mstype.float32)\n input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)\n input_3d_ms = Tensor(input_3d_np, mstype.float32)\n\n output_ms = net(input_1d_ms)\n assert np.all(output_ms.asnumpy() == input_1d_np.item())\n\n with pytest.raises(ValueError):\n net(input_3d_ms)\n\n\nclass TensorItemByItem(Cell):\n def construct(self, tensor, index):\n ret = tensor.item(index)\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_item_by_int():\n net = TensorItemByItem()\n input_1d_np = np.ndarray([1]).astype(np.float32)\n input_1d_ms = Tensor(input_1d_np, mstype.float32)\n\n input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)\n input_3d_ms = Tensor(input_3d_np, mstype.float32)\n\n index_np_1, index_np_2, index_np_3, index_np_4 = 0, 1.0, 30, 60\n\n output_1d_ms = net(input_1d_ms, index_np_1)\n output_3d_ms_1 = net(input_3d_ms, index_np_1)\n output_3d_ms_2 = net(input_3d_ms, index_np_3)\n\n assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))\n assert np.all(output_3d_ms_1.asnumpy() == input_3d_np.item(index_np_1))\n assert np.all(output_3d_ms_2.asnumpy() == input_3d_np.item(index_np_3))\n\n with pytest.raises(TypeError):\n net(input_1d_ms, index_np_2)\n\n with pytest.raises(IndexError):\n net(input_1d_ms, index_np_3)\n\n with pytest.raises(TypeError):\n net(input_3d_ms, index_np_2)\n\n with pytest.raises(IndexError):\n net(input_3d_ms, index_np_4)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_item_by_tuple():\n net = TensorItemByItem()\n input_1d_np = np.ndarray([1]).astype(np.float32)\n input_1d_ms = Tensor(input_1d_np, mstype.float32)\n input_3d_np = np.random.randint(3, size=(3, 4, 5)).astype(np.int32)\n input_3d_ms = Tensor(input_3d_np, mstype.float32)\n\n index_np_1 = (0,)\n index_np_2 = (1, 2)\n index_np_3 = (1, 2, 3)\n index_np_4 = (3, 4, 4)\n index_np_5 = (1, 2, 3, 4)\n\n output_1d_ms = net(input_1d_ms, index_np_1)\n output_3d_ms = net(input_3d_ms, index_np_3)\n assert np.all(output_1d_ms.asnumpy() == input_1d_np.item(index_np_1))\n assert np.all(output_3d_ms.asnumpy() == input_3d_np.item(index_np_3))\n\n with pytest.raises(ValueError):\n net(input_1d_ms, index_np_2)\n\n with pytest.raises(ValueError):\n net(input_3d_ms, index_np_2)\n\n with pytest.raises(IndexError):\n net(input_3d_ms, index_np_4)\n\n with pytest.raises(ValueError):\n net(input_3d_ms, index_np_5)\n\n\nclass TensorSetItemByMixedTensors_0(Cell):\n def __init__(self, value):\n super(TensorSetItemByMixedTensors_0, self).__init__()\n self.const = Tensor(np.ones((3, 4, 5), np.float32))\n self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)),\n mstype.float32),\n name=\"x\")\n self.value = value\n\n def construct(self, index_0, index_1, index_2):\n self.param[0:2, index_0, index_1] = self.value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_mixed_tensors_0():\n value = 88.0\n net = TensorSetItemByMixedTensors_0(value)\n index_0 = np.random.randint(3, size=(3, 4, 5))\n index_1 = np.random.randint(4, size=(4, 5))\n index_2 = np.random.randint(3, size=(2, 1, 4, 5))\n index_0_ms = Tensor(index_0, mstype.int32)\n index_1_ms = Tensor(index_1, mstype.int32)\n index_2_ms = Tensor(index_2, mstype.int32)\n input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)\n const = np.ones((3, 4, 5), np.float32)\n out = net(index_0_ms, index_1_ms, index_2_ms)\n input_np[0:2, index_0, index_1] = value\n assert np.all(out.asnumpy() == (input_np + const))\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\nclass TensorSetItemByMixedTensors_1(Cell):\n def __init__(self, value):\n super(TensorSetItemByMixedTensors_1, self).__init__()\n self.const = Tensor(np.ones((3, 4, 5), np.float32))\n self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float32),\n name=\"x\")\n self.value = value\n\n def construct(self, index_0, index_1, index_2):\n self.param[0:2, index_0, ...] = self.value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_mixed_tensors_1():\n value = 88.0\n net = TensorSetItemByMixedTensors_1(value)\n index_0 = np.random.randint(3, size=(3, 4, 5))\n index_1 = np.random.randint(4, size=(4, 5))\n index_2 = np.random.randint(3, size=(2, 1, 4, 5))\n index_0_ms = Tensor(index_0, mstype.int32)\n index_1_ms = Tensor(index_1, mstype.int32)\n index_2_ms = Tensor(index_2, mstype.int32)\n input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)\n const = np.ones((3, 4, 5), np.float32)\n out = net(index_0_ms, index_1_ms, index_2_ms)\n input_np[0:2, index_0, ...] = value\n assert np.all(out.asnumpy() == (input_np + const))\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\nclass TensorSetItemByMixedTensors_2(Cell):\n def __init__(self, value):\n super(TensorSetItemByMixedTensors_2, self).__init__()\n self.const = Tensor(np.ones((3, 4, 5), np.float16))\n self.param = Parameter(Tensor(np.arange(3 * 4 * 5).reshape((3, 4, 5)), mstype.float16),\n name=\"x\")\n self.value = value\n\n def construct(self, index_0, index_1, index_2):\n self.param[..., index_0, 1] = self.value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_mixed_tensors_2():\n value = 88.0\n net = TensorSetItemByMixedTensors_2(value)\n index_0 = np.random.randint(3, size=(3, 4, 5))\n index_1 = np.random.randint(4, size=(4, 5))\n index_2 = np.random.randint(3, size=(2, 1, 4, 5))\n index_0_ms = Tensor(index_0, mstype.int32)\n index_1_ms = Tensor(index_1, mstype.int32)\n index_2_ms = Tensor(index_2, mstype.int32)\n input_np = np.arange(3 * 4 * 5).reshape((3, 4, 5)).astype(np.float32)\n const = np.ones((3, 4, 5), np.float32)\n out = net(index_0_ms, index_1_ms, index_2_ms)\n input_np[..., index_0, 1] = value\n assert np.all(out.asnumpy() == (input_np + const))\n\n\nclass TensorGetItemByMixedTensorsIndexError(Cell):\n def construct(self, x, index_0, index_1):\n ret = x[index_0, index_1, 0:3, ..., 0:5, [1, 2, 3, 4]]\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_getitem_by_mixed_tensor_exception():\n input_ms = Tensor(np.arange(3 * 4 * 5 * 6 * 7 * 8 * 9).reshape((3, 4, 5, 6, 7, 8, 9)), mstype.int32)\n index_0 = Tensor(np.random.randint(3, size=(3, 4, 5)), mstype.int32)\n index_1 = Tensor(np.random.randint(4, size=(3, 4, 5)), mstype.int32)\n net1 = TensorGetItemByMixedTensorsIndexError()\n with pytest.raises(IndexError):\n net1(input_ms, index_0, index_1)\n\n\nclass TensorSetItemByOneTensorWithNumber(Cell):\n def __init__(self, value):\n super(TensorSetItemByOneTensorWithNumber, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n self.value = value\n\n def construct(self, index):\n self.param[index] = self.value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_one_tensor_with_number():\n value = 0.0\n net = TensorSetItemByOneTensorWithNumber(value)\n index_np = np.random.randint(4, size=(5, 4))\n index = Tensor(index_np, mstype.int32)\n input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))\n const = np.ones((6, 7, 8)).astype(np.float32)\n out = net(index)\n input_data[index_np] = value\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByOneTensorWithTensor(Cell):\n def __init__(self):\n super(TensorSetItemByOneTensorWithTensor, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n\n def construct(self, index, value):\n self.param[index] = value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_one_tensor_with_tensor():\n net = TensorSetItemByOneTensorWithTensor()\n index_np = np.random.randint(4, size=(5, 4))\n index = Tensor(index_np, mstype.int32)\n input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8))\n const = np.ones((6, 7, 8)).astype(np.float32)\n value = np.zeros((4, 7, 8)).astype(np.float32)\n value_ms = Tensor(value, mstype.float32)\n out = net(index, value_ms)\n input_data[index_np] = value\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByOneTensorWithTupleOfNumber(Cell):\n def __init__(self, value):\n super(TensorSetItemByOneTensorWithTupleOfNumber, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n self.value = value\n\n def construct(self, index):\n self.param[index] = self.value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_one_tensor_with_tuple_number():\n value = (0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7)\n net = TensorSetItemByOneTensorWithTupleOfNumber(value)\n input_np = np.random.randint(5, size=(5, 4))\n input_ms = Tensor(input_np, mstype.int32)\n input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)\n const = np.ones((6, 7, 8)).astype(np.float32)\n out = net(input_ms)\n input_data[input_np] = value\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByOneTensorWithTupleOfTensor(Cell):\n def __init__(self):\n super(TensorSetItemByOneTensorWithTupleOfTensor, self).__init__()\n self.const = Tensor(np.ones((6, 3, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 3 * 8).reshape((6, 3, 8)), mstype.float32), name=\"x\")\n\n def construct(self, index, value_0, value_1, value_2):\n self.param[index] = (value_0, value_1, value_2)\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_one_tensor_with_tuple_tensors():\n net = TensorSetItemByOneTensorWithTupleOfTensor()\n input_np = np.random.randint(6, size=(5, 4)).astype(np.int32)\n input_ms = Tensor(input_np, mstype.int32)\n input_data = np.arange(6 * 3 * 8).reshape((6, 3, 8)).astype(np.float32)\n value_0_np = np.zeros((8,), np.float32)\n value_1_np = np.ones((8,), np.float32)\n value_2_np = np.ones((8,), np.float32)*2\n value_0 = Tensor(value_0_np)\n value_1 = Tensor(value_1_np)\n value_2 = Tensor(value_2_np)\n const = np.ones((6, 3, 8)).astype(np.float32)\n out = net(input_ms, value_0, value_1, value_2)\n input_data[input_np] = (value_0_np, value_1_np, value_2_np)\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByTensorsWithNumber(Cell):\n def __init__(self, value):\n super(TensorSetItemByTensorsWithNumber, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n self.value = value\n\n def construct(self, index_0, index_1, index_2):\n self.param[index_0, index_1, index_2] = self.value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\[email protected]\ndef test_setitem_by_tensors_with_number():\n value = 0.0\n net = TensorSetItemByTensorsWithNumber(value)\n index_0 = np.random.randint(6, size=(3, 4, 5))\n index_1 = np.random.randint(7, size=(4, 5))\n index_2 = np.random.randint(8, size=(5, 3, 4, 5))\n index_0_ms = Tensor(index_0, mstype.int32)\n index_1_ms = Tensor(index_1, mstype.int32)\n index_2_ms = Tensor(index_2, mstype.int32)\n out = net(index_0_ms, index_1_ms, index_2_ms)\n const = np.ones((6, 7, 8)).astype(np.float32)\n input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)\n input_data[index_0, index_1, index_2] = value\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByTensorsWithTensor(Cell):\n def __init__(self):\n super(TensorSetItemByTensorsWithTensor, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n\n def construct(self, index_0, index_1, index_2, value):\n self.param[index_0, index_1, index_2] = value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_tensors_with_tensor():\n net = TensorSetItemByTensorsWithTensor()\n index_0 = np.random.randint(6, size=(3, 4, 5))\n index_1 = np.random.randint(7, size=(4, 5))\n index_2 = np.random.randint(8, size=(5, 3, 4, 5))\n value = np.zeros((4, 5)).astype(np.float32)\n index_0_ms = Tensor(index_0, mstype.int32)\n index_1_ms = Tensor(index_1, mstype.int32)\n index_2_ms = Tensor(index_2, mstype.int32)\n value_ms = Tensor(value, mstype.float32)\n out = net(index_0_ms, index_1_ms, index_2_ms, value_ms)\n const = np.ones((6, 7, 8)).astype(np.float32)\n input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)\n input_data[index_0, index_1, index_2] = value\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByTensorsWithTensorNumberError(Cell):\n def __init__(self):\n super(TensorSetItemByTensorsWithTensorNumberError, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n\n def construct(self, index_0, index_1, index_2, index_3, value):\n self.param[index_0, index_1, index_2, index_3] = value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_tensors_with_tensor_error():\n index_0 = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)\n index_1 = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)\n index_2 = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)\n index_3 = Tensor(np.random.randint(8, size=(1, 3, 4, 5)), mstype.int32)\n value = Tensor(np.zeros((2, 5)), mstype.float32)\n net = TensorSetItemByTensorsWithTensorNumberError()\n with pytest.raises(IndexError):\n net(index_0, index_1, index_2, index_3, value)\n\n\nclass TensorSetItemByTensorsWithTupleOfNumber(Cell):\n def __init__(self, value):\n super(TensorSetItemByTensorsWithTupleOfNumber, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n self.value = value\n\n def construct(self, index_0, index_1, index_2):\n self.param[index_0, index_1, index_2] = self.value\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\n# GPU op has bug, and has not been fixed.\[email protected]_onecard\ndef test_setitem_by_tensors_with_tuple_of_number():\n value = (0.0, 1.1, 2.2, 3.3, 4.4)\n net = TensorSetItemByTensorsWithTupleOfNumber(value)\n index_0 = np.random.randint(6, size=(3, 4, 5))\n index_1 = np.random.randint(7, size=(4, 5))\n index_2 = np.random.randint(8, size=(5, 3, 4, 5))\n index_0_ms = Tensor(index_0, mstype.int32)\n index_1_ms = Tensor(index_1, mstype.int32)\n index_2_ms = Tensor(index_2, mstype.int32)\n input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)\n input_data[index_0, index_1, index_2] = value\n const = np.ones((6, 7, 8)).astype(np.float32)\n out = net(index_0_ms, index_1_ms, index_2_ms)\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByTensorsWithTupleOfTensor(Cell):\n def __init__(self):\n super(TensorSetItemByTensorsWithTupleOfTensor, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n\n def construct(self, index_0, index_1, index_2, value_0, value_1, value_2):\n self.param[index_0, index_1, index_2] = (value_0, value_1, value_2)\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\n# GPU op has bug, and has not been fixed.\[email protected]_onecard\ndef test_setitem_by_tensors_with_tuple_of_tensor():\n value_0 = np.zeros((4, 5))\n value_1 = np.ones((4, 5))\n value_2 = np.ones((4, 5)) * 2\n value_0_ms = Tensor(value_0, mstype.float32)\n value_1_ms = Tensor(value_1, mstype.float32)\n value_2_ms = Tensor(value_2, mstype.float32)\n net = TensorSetItemByTensorsWithTupleOfTensor()\n index_0 = np.random.randint(6, size=(3, 4, 5))\n index_1 = np.random.randint(7, size=(4, 5))\n index_2 = np.random.randint(8, size=(5, 3, 4, 5))\n index_0_ms = Tensor(index_0, mstype.int32)\n index_1_ms = Tensor(index_1, mstype.int32)\n index_2_ms = Tensor(index_2, mstype.int32)\n input_data = np.arange(6 * 7 * 8).reshape((6, 7, 8)).astype(np.float32)\n input_data[index_0, index_1, index_2] = (value_0, value_1, value_2)\n const = np.ones((6, 7, 8)).astype(np.float32)\n out = net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms, value_2_ms)\n assert np.all(out.asnumpy() == (input_data + const))\n\n\nclass TensorSetItemByTensorsWithTupleOfTensorNumberError(Cell):\n def __init__(self):\n super(TensorSetItemByTensorsWithTupleOfTensorNumberError, self).__init__()\n self.const = Tensor(np.ones((6, 7, 8)), mstype.float32)\n self.param = Parameter(Tensor(np.arange(6 * 7 * 8).reshape((6, 7, 8)), mstype.float32), name=\"x\")\n\n def construct(self, index_0, index_1, index_2, value_0, value_1):\n self.param[index_0, index_1, index_2] = (value_0, value_1)\n ret = self.param + self.const\n return ret\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_by_tensor_with_tuple_of_tensor_error():\n net = TensorSetItemByTensorsWithTupleOfTensorNumberError()\n index_0_ms = Tensor(np.random.randint(6, size=(3, 4, 5)), mstype.int32)\n index_1_ms = Tensor(np.random.randint(7, size=(4, 5)), mstype.int32)\n index_2_ms = Tensor(np.random.randint(8, size=(5, 3, 4, 5)), mstype.int32)\n value_0 = np.zeros((4, 5))\n value_1 = np.ones((4, 5))\n value_0_ms = Tensor(value_0, mstype.float32)\n value_1_ms = Tensor(value_1, mstype.float32)\n with pytest.raises(ValueError):\n net(index_0_ms, index_1_ms, index_2_ms, value_0_ms, value_1_ms)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_setitem_grad():\n class Net(Cell):\n def __init__(self):\n super(Net, self).__init__()\n self.weight = Parameter(\n Tensor(np.ones([4, 4, 5]), dtype=mstype.float32), \"b1\", requires_grad=True)\n\n def construct(self, a, b):\n a[1:3:1, ::] = b\n c = a + self.weight\n return c\n\n class GradNet(Cell):\n def __init__(self, net):\n super(GradNet, self).__init__()\n self.net = net\n self.weights = ParameterTuple(net.trainable_params())\n\n def construct(self, x, y, sens):\n return grad_by_list_with_sens(self.net, self.weights)(x, y, sens)\n net = GradNet(Net())\n x = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)\n y = Tensor(np.array([3]).astype(np.float32), mstype.float32)\n sens = Tensor(np.ones([4, 4, 5]).astype(np.float32), mstype.float32)\n net(x, y, sens)\n\n\nclass TensorAssignWithSliceError1(Cell):\n def construct(self, a, b):\n a[1:3:-1, ::] = b\n return a\n\n\nclass TensorAssignWithSliceError2(Cell):\n def construct(self, a, b):\n a[1:3:-1] = b\n return a\n\n\nclass TensorAssignWithSlice2(Cell):\n def construct(self, a, b, ck):\n a[1:5] = b\n a[3:4] = 5\n a[-1:1:-1] = b\n a[-1:3:-1] = 5\n a[::] = b\n a[::] = 9\n z = a + ck\n return z\n\n\nclass TensorAssignWithSlice(Cell):\n def __init__(self):\n super(TensorAssignWithSlice, self).__init__()\n self.c = 2.0\n\n def construct(self, a, b, ck):\n a[1:3, ::] = b\n a[2:3:, 3:] = b\n a[::] = b\n a[::] = self.c\n a[::, ::] = b\n a[::, ::] = self.c\n a[2:3:, 0:, 4:1:-1] = b\n a[2:3:, 0:, 4:1:-1] = self.c\n z = a + ck\n return z\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_assign_slice_value_1():\n net = TensorAssignWithSlice()\n a = np.arange(60).reshape(3, 4, 5)\n b = np.array([1]).astype(np.float32) # Tensor([1], dtype=mstype.float32)\n ck = np.arange(60).reshape(3, 4, 5)\n ta = Tensor(a, dtype=mstype.float32)\n tb = Tensor(b, dtype=mstype.float32)\n tck = Tensor(ck, dtype=mstype.float32)\n out = net(ta, tb, tck)\n a[1:3, ::] = b\n a[2:3:, 3:] = b\n a[::] = b\n a[::] = 2.0\n a[::, ::] = b\n a[::, ::] = 2.0\n a[2:3:, 0:, 4:1:-1] = b\n a[2:3:, 0:, 4:1:-1] = 2.0\n z = a + ck\n assert np.all(z == out.asnumpy())\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_assign_slice_value_2():\n net2 = TensorAssignWithSlice2()\n a = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n ck = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n b = np.array([1]).astype(np.float32) # Tensor([1], dtype=mstype.float32)\n tb = Tensor(b, dtype=mstype.float32)\n ta = Tensor(a, dtype=mstype.float32)\n tck = Tensor(ck, dtype=mstype.float32)\n out = net2(ta, tb, tck)\n a[1:5] = b\n a[3:4] = 5\n a[-1:1:-1] = b\n a[-1:3:-1] = 5\n a[::] = b\n a[::] = 9\n z = a + ck\n assert np.all(z == out.asnumpy())\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_assign_exception():\n net = TensorAssignWithSlice()\n net2 = TensorAssignWithSlice2()\n # The test case is no longer appropriate since x[1:3:-1] = np.array(2) does\n # not incur an error in numpy, which leaves the original array unchanged after\n # the assign operation.\n # net_e1 = TensorAssignWithSliceError1()\n # net_e2 = TensorAssignWithSliceError2()\n a = np.arange(60).reshape(3, 4, 5)\n ck = np.arange(60).reshape(3, 4, 5)\n b = Tensor([1], dtype=mstype.float32)\n Ta = Tensor(a, dtype=mstype.float32)\n Tck = Tensor(ck, dtype=mstype.float32)\n Ta4d = Tensor(a.reshape(1, 3, 4, 5), dtype=mstype.float32)\n Ta4d_ck = Tensor(ck.reshape(1, 3, 4, 5), dtype=mstype.float32)\n Tb = Tensor([1, 3], dtype=mstype.float32)\n Tc = Tensor([], dtype=mstype.float32)\n t = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)\n tck = Tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=mstype.float32)\n # Error for A[Slice] = Number\n # 1. A[Slice] = Number, Slice error\n # with pytest.raises(ValueError):\n # net_e2(t, 2)\n\n # Error for A[Slice] = U, U is a Tensor\n # 1. A[Slice] = U, u.size is error\n with pytest.raises(ValueError):\n net2(t, Tb, tck)\n # 2. A[Slice] = U, U is empty\n with pytest.raises(ValueError):\n net2(t, Tc, tck)\n # 3. A[Slice] = U, U.size error\n with pytest.raises(ValueError):\n net2(t, Tb, tck)\n\n # Error for A[Tuple(Slice...)] = Tensor\n # 1. A[Tuple(Slice...)] = U, U is empty\n with pytest.raises(ValueError):\n net(Ta, Tc, Tck)\n # 2. A[Tuple(Slice...)] = U, U.size error\n with pytest.raises(ValueError):\n net(Ta, Tb, Tck)\n # 3. A[Tuple(Slice...)] = U, Slice error\n # with pytest.raises(IndexError):\n # net_e1(Ta, b)\n\n # Error for A[Tuple(Slice...)] = Number\n # 1. A[Tuple(Slice...)] = Number, Slice error\n # with pytest.raises(IndexError):\n # net_e1(Ta, 2)\n\n net = TensorAssignWithInteger()\n # Error for A[Number] = scalar/Tensor\n # 1. A[Number] = U, U is a Tensor, u.size not match\n with pytest.raises(ValueError):\n net(Ta, Tb, Tck)\n with pytest.raises(ValueError):\n net(Ta, Tc, Tck)\n # 2. A[Number] = U, the number index error\n with pytest.raises(IndexError):\n net(Ta4d, b, Ta4d_ck)\n\n # Error for A[(n,m)] = scalar/Tensor\n # 1. A[(n,m)] = U, U is a tensor. u.size not match\n net = TensorAssignWithTupleInteger()\n with pytest.raises(ValueError):\n net(Ta, Tc, Tck)\n with pytest.raises(ValueError):\n net(Ta, Tb, Tck)\n # 2. A[(n,m)] = U, the number index error\n with pytest.raises(IndexError):\n net(Ta4d, b, Ta4d_ck)\n\n # Error for A[...] = U or A[1:, ...] = u\n # 1. A[...] = scalar/tensor\n net = TensorAssignWithEllipsis()\n net(Ta, Ta4d)\n with pytest.raises(ValueError):\n net(Ta, Tc)\n with pytest.raises(ValueError):\n net(Ta, Tb)\n # 2. A[::, 1:, ...] = scalar/tensor\n net = TensorAssignWithTupleEllipsis()\n net(Ta, b)\n with pytest.raises(ValueError):\n net(Ta, Tb)\n\n\nclass TensorAssignWithTupleEllipsis2(Cell):\n def construct(self, a, b):\n a[1:, ..., ::] = b\n return a\n\n\nclass TensorAssignWithTupleEllipsis(Cell):\n def construct(self, a, b):\n a[:2, ...] = 1.0\n a[1:, ...] = b\n return a\n\n\nclass TensorAssignWithEllipsis(Cell):\n def construct(self, a, b):\n a[...] = 1\n a[...] = b\n return a\n\n\nclass TensorAssignWithInteger(Cell):\n def construct(self, a, b, ck):\n a[1] = 1\n a[0] = b\n z = a + ck\n return z\n\n\nclass TensorAssignWithTupleInteger(Cell):\n def construct(self, a, b, ck):\n a[(1)] = 1\n a[(1)] = b\n a[(1, 1)] = b\n a[(1, 1)] = 1\n z = a + ck\n return z\n\n\nclass TensorAssignWithBoolTensorIndex(Cell):\n def __init__(self):\n super(TensorAssignWithBoolTensorIndex, self).__init__()\n self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)\n self.u_scalar = 5\n\n def construct(self, a, b, c, u_tensor):\n a[c] = self.u_scalar\n a[b] = u_tensor\n z = a + self.t\n return z\n\n\nclass TensorAssignWithBoolTensorIndexError(Cell):\n def construct(self, a, b, c, u_tensor):\n a[b][c] = u_tensor\n return a\n\n\nclass TensorAssignWithBoolTensorIndex2(Cell):\n def __init__(self):\n super(TensorAssignWithBoolTensorIndex2, self).__init__()\n self.t = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32)\n self.u_scalar = 5\n\n def construct(self, a, u_tensor):\n a[a > 8] = u_tensor\n a[a >= 6] = self.u_scalar\n a[a < 3] = self.u_scalar\n a[a <= 5] = u_tensor\n a[a == 5] = self.u_scalar\n z = a + self.t\n return z\n\n\nclass TensorAssignWithBoolTensorIndex2Error(Cell):\n def construct(self, a, u_tensor):\n a[a > 8][a > 5] = u_tensor\n return a\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_assign_bool_index_0():\n a = np.arange(60).reshape(3, 4, 5)\n b = a > 5\n c = a < 3\n Ta = Tensor(a, dtype=mstype.float32)\n Tb = Tensor(b)\n Tc = Tensor(c)\n u_tensor = Tensor([1], dtype=mstype.float32)\n net1 = TensorAssignWithBoolTensorIndex()\n out = net1(Ta, Tb, Tc, u_tensor)\n res = np.arange(60).reshape(3, 4, 5)\n res[c] = 5\n res[b] = 1\n res = res + np.ones([3, 4, 5])\n assert np.all(out.asnumpy() == res)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_assign_bool_index_1():\n a = np.arange(60).reshape(3, 4, 5)\n Ta = Tensor(a, dtype=mstype.float32)\n u_tensor = Tensor([1], dtype=mstype.float32)\n net2 = TensorAssignWithBoolTensorIndex2()\n out = net2(Ta, u_tensor)\n res = np.arange(60).reshape(3, 4, 5)\n res[res > 8] = 1\n res[res >= 6] = 5\n res[res < 3] = 5\n res[res <= 5] = 1\n res[res == 5] = 5\n res = res + np.ones([3, 4, 5])\n assert np.all(out.asnumpy() == res)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_assign_bool_index_exception():\n a = np.arange(60).reshape(3, 4, 5)\n b = a > 5\n c = a < 3\n Ta = Tensor(a, dtype=mstype.float32)\n Tb = Tensor(b)\n Tc = Tensor(c)\n Td = Tensor([True, True])\n u_tensor = Tensor([1], dtype=mstype.float32)\n u_tensor_error = Tensor([1, 2], dtype=mstype.float32)\n u_scalar = 5\n net1 = TensorAssignWithBoolTensorIndex()\n net2 = TensorAssignWithBoolTensorIndex2()\n with pytest.raises(ValueError):\n net1(Ta, Td, Tc, u_tensor)\n with pytest.raises(IndexError):\n net1(Ta, u_tensor, Tc, u_tensor)\n with pytest.raises(ValueError):\n net1(Ta, Tb, Td, u_tensor)\n with pytest.raises(IndexError):\n net1(Ta, Tb, Ta, u_tensor)\n with pytest.raises(ValueError):\n net1(Ta, Tb, Tc, u_tensor_error)\n # net1(Ta, u_tensor, Tc, u_tensor_error, u_scalar)\n with pytest.raises(ValueError):\n net2(Ta, u_tensor_error)\n net3 = TensorAssignWithBoolTensorIndexError()\n with pytest.raises(IndexError):\n net3(Ta, Tb, Tc, u_tensor)\n with pytest.raises(IndexError):\n net3(Ta, Tb, Tc, u_scalar)\n net4 = TensorAssignWithBoolTensorIndex2Error()\n with pytest.raises(IndexError):\n net4(Ta, u_tensor)\n with pytest.raises(IndexError):\n net4(Ta, u_scalar)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_slice_reduce_out_of_bounds_neg():\n class NetWork(Cell):\n def __init__(self):\n super(NetWork, self).__init__()\n self.tensor_ret = Tensor(np.array(9, np.int32))\n\n def construct(self, tensor):\n ret = tensor[-7, 3, 4]\n return ret\n\n input_tensor = Tensor(np.ones([6, 8, 10], np.int32))\n net = NetWork()\n with pytest.raises(IndexError) as ex:\n net(input_tensor)\n assert \"'begin' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, \" \\\n \"but got 'shrink_axis_mask': 7, 'strides': 1, 'begin': -7.\" in str(ex.value)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_slice_reduce_out_of_bounds_positive():\n class NetWork(Cell):\n def __init__(self):\n super(NetWork, self).__init__()\n self.tensor_ret = Tensor(np.array(9, np.int32))\n\n def construct(self, tensor):\n ret = tensor[6, 3, 4]\n return ret\n\n input_tensor = Tensor(np.ones([6, 8, 10], np.int32))\n net = NetWork()\n with pytest.raises(IndexError) as ex:\n net(input_tensor)\n assert \"'begin' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, \" \\\n \"but got 'shrink_axis_mask': 7, 'strides': 1, 'begin': 6.\" in str(ex.value)\n\n\[email protected]\[email protected]_arm_ascend_training\[email protected]_x86_ascend_training\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_tensor_range():\n a = np.arange(4*5*6).reshape(4, 5, 6).astype(np.float32)\n ta = Tensor(a, mstype.float32)\n ms_out = []\n for item in ta:\n ms_out.append(item)\n np_out = []\n for item in a:\n np_out.append(item)\n for i, elem in enumerate(ms_out):\n assert np.all(elem.asnumpy() == np_out[i])\n", "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nfrom mindspore import context, Tensor\nfrom mindspore.common import dtype as mstype\nfrom mindspore.nn import Cell\nfrom mindspore.ops.op_info_register import DataType\nfrom mindspore.ops.operations.custom_ops import Custom, CustomRegOp, custom_op_info_register\n\nouter_product_ascend_info = CustomRegOp() \\\n .fusion_type(\"OPAQUE\") \\\n .input(0, \"x1\") \\\n .input(1, \"x2\") \\\n .output(0, \"y\") \\\n .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \\\n .target(\"Ascend\") \\\n .get_op_info()\n\nouter_product_gpu_info = CustomRegOp() \\\n .fusion_type(\"OPAQUE\") \\\n .input(0, \"x1\") \\\n .input(1, \"x2\") \\\n .output(0, \"y\") \\\n .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \\\n .target(\"GPU\") \\\n .get_op_info()\n\n\n@custom_op_info_register(outer_product_ascend_info, outer_product_gpu_info)\ndef outer_product(a, b):\n c = output_tensor((a.shape[0], b.shape[1]), 'float32')\n\n for i0 in range(a.shape[0]):\n for i1 in range(b.shape[1]):\n c[i0, i1] = 0.0\n for i2 in range(a.shape[1]):\n c[i0, i1] = c[i0, i1] + (a[i0, i2] * b[i2, i1])\n return c\n\n\nclass TestHybrid(Cell):\n \"\"\"Net definition\"\"\"\n def __init__(self):\n super(TestHybrid, self).__init__()\n\n def infer_func(x, y):\n return x\n\n self.program = Custom(outer_product, out_shape=infer_func, out_dtype=infer_func, func_type=\"akg\")\n\n def construct(self, x, y):\n return self.program(x, y)\n\n\ndef test_hybrid():\n \"\"\"\n Feature: ALL To ALL\n Description: hybrid test cases.\n Expectation: the result match with numpy result\n \"\"\"\n input_x = np.random.normal(0, 1, [4, 4]).astype(np.float32)\n input_y = np.random.normal(0, 1, [4, 4]).astype(np.float32)\n\n test = TestHybrid()\n output = test(Tensor(input_x), Tensor(input_y))\n expect = np.matmul(input_x, input_y)\n compare_res = np.allclose(expect, output.asnumpy(), 0.001, 0.001)\n if not compare_res:\n raise ValueError(\"Precision error, compare result: {}\".format(compare_res))\n\n\ndef test_hybrid_ascend():\n \"\"\"\n Feature: ALL To ALL\n Description: hybrid ascend test cases.\n Expectation: the result match with numpy result\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n test_hybrid()\n\n\ndef test_hybrid_gpu():\n \"\"\"\n Feature: ALL To ALL\n Description: hybrid gpu test cases.\n Expectation: the result match with numpy result\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n test_hybrid()\n\n\nv_add_ascend_info = CustomRegOp() \\\n .fusion_type(\"OPAQUE\") \\\n .input(0, \"x\", \"dynamic\") \\\n .output(0, \"y\") \\\n .dtype_format(DataType.F16_Default, DataType.F16_Default) \\\n .target(\"Ascend\") \\\n .get_op_info()\n\nv_add_gpu_info = CustomRegOp() \\\n .fusion_type(\"OPAQUE\") \\\n .input(0, \"x\", \"dynamic\") \\\n .output(0, \"y\") \\\n .dtype_format(DataType.F16_Default, DataType.F16_Default) \\\n .target(\"GPU\") \\\n .get_op_info()\n\n\n@custom_op_info_register(v_add_ascend_info, v_add_gpu_info)\ndef v_add(inputs, attrs):\n def vadd_func(dst, data_1, data_2):\n ib = tvm.ir_builder.create()\n with ib.for_range_n(data_1.shape, \"i\") as i:\n ib.store(dst, i, ib.load(data_1, i) + ib.load(data_2, i))\n return ib.get()\n\n data_1, data_2 = inputs[0], inputs[1]\n return tvm.extern(data_1.shape, [data_1, data_2],\n lambda ins, outs: vadd_func(outs[0], ins[0], ins[1]),\n name=\"v_add\", dtype=data_1.dtype)\n\n\nclass TestIRbuilder(Cell):\n \"\"\"Net definition\"\"\"\n def __init__(self, shape):\n super(TestIRbuilder, self).__init__()\n self.program = Custom(v_add, out_shape=shape, out_dtype=mstype.float16, func_type=\"akg\")\n\n def construct(self, x, y):\n return self.program([x, y])\n\n\ndef test_irbuider():\n \"\"\"\n Feature: ALL To ALL\n Description: irbuider test cases.\n Expectation: the result match with numpy result\n \"\"\"\n shape = (4, 5)\n input_x = np.random.normal(0, 1, shape).astype(np.float16)\n input_y = np.random.normal(0, 1, shape).astype(np.float16)\n\n test = TestIRbuilder(shape)\n output = test(Tensor(input_x), Tensor(input_y))\n compare_res = np.allclose(input_x + input_y, output.asnumpy(), 0.001, 0.001)\n if not compare_res:\n raise ValueError(\"Precision error, compare result: {}\".format(compare_res))\n\n\ndef test_irbuider_ascend():\n \"\"\"\n Feature: ALL To ALL\n Description: irbuider ascend test cases.\n Expectation: the result match with numpy result\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\")\n test_irbuider()\n\n\ndef test_irbuider_gpu():\n \"\"\"\n Feature: ALL To ALL\n Description: irbuider gpu test cases.\n Expectation: the result match with numpy result\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n test_irbuider()\n" ]
[ [ "numpy.eye", "numpy.identity", "numpy.zeros" ], [ "numpy.fromfile", "scipy.io.loadmat", "numpy.frombuffer", "numpy.equal", "numpy.array" ], [ "numpy.arange", "numpy.ndarray", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.random.randint" ], [ "numpy.random.normal", "numpy.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XiaoqiMa/shapSD
[ "545f61c9e8329c7271051f22f99ba32508ba74a1" ]
[ "shapSD/pysubgroup/nominal_target.py" ]
[ "'''\nCreated on 29.09.2017\n\n@author: lemmerfn\n'''\nimport numpy as np\nimport scipy.stats\nfrom functools import total_ordering\n\nfrom .measures import AbstractInterestingnessMeasure, BoundedInterestingnessMeasure\nfrom .utils import effective_sample_size, powerset\nfrom .subgroup import SubgroupDescription, Subgroup, NominalSelector\n\n\n@total_ordering\nclass NominalTarget(object):\n\n def __init__(self, target_attribute=None, target_value=None, target_selector=None):\n \"\"\"\n Creates a new target for the boolean model class (classic subgroup discovery). \n If target_attribute and target_value are given, the target_selector is computed using attribute and value\n \"\"\"\n if target_attribute is not None and target_value is not None:\n if target_selector is not None:\n raise BaseException(\n \"NominalTarget is to be constructed EITHER by a selector OR by attribute/value pair\")\n target_selector = NominalSelector(target_attribute, target_value)\n if target_selector is None:\n raise BaseException(\"No target selector given\")\n self.target_selector = target_selector\n\n def __repr__(self):\n return \"T: \" + str(self.target_selector)\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __lt__(self, other):\n return str(self) < str(other)\n\n def covers(self, instance):\n return self.target_selector.covers(instance)\n\n def get_attributes(self):\n return [self.target_selector.get_attribute_name()]\n\n @staticmethod\n def get_base_statistics(data, subgroup, weighting_attribute=None):\n\n if weighting_attribute is None:\n sg_instances = subgroup.subgroup_description.covers(data)\n positives = subgroup.target.covers(data)\n instances_subgroup = np.sum(sg_instances)\n positives_dataset = np.sum(positives)\n instances_dataset = len(data)\n positives_subgroup = np.sum(np.logical_and(sg_instances, positives))\n return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup\n else:\n weights = data[weighting_attribute]\n sg_instances = subgroup.subgroup_description.covers(data)\n positives = subgroup.target.covers(data)\n\n instances_dataset = np.sum(weights)\n instances_subgroup = np.sum(np.dot(sg_instances, weights))\n positives_dataset = np.sum(np.dot(positives, weights))\n positives_subgroup = np.sum(np.dot(np.logical_and(sg_instances, positives), weights))\n return instances_dataset, positives_dataset, instances_subgroup, positives_subgroup\n\n @staticmethod\n def calculate_statistics(subgroup, data, weighting_attribute=None):\n (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \\\n NominalTarget.get_base_statistics(data, subgroup, weighting_attribute)\n subgroup.statistics['size_sg'] = instances_subgroup\n subgroup.statistics['size_dataset'] = instances_dataset\n subgroup.statistics['positives_sg'] = positives_subgroup\n subgroup.statistics['positives_dataset'] = positives_dataset\n\n subgroup.statistics['size_complement'] = instances_dataset - instances_subgroup\n subgroup.statistics['relative_size_sg'] = instances_subgroup / instances_dataset\n subgroup.statistics['relative_size_complement'] = (instances_dataset - instances_subgroup) / instances_dataset\n subgroup.statistics['coverage_sg'] = positives_subgroup / positives_dataset\n subgroup.statistics['coverage_complement'] = (positives_dataset - positives_subgroup) / positives_dataset\n subgroup.statistics['target_share_sg'] = positives_subgroup / instances_subgroup\n subgroup.statistics['target_share_complement'] = (positives_dataset - positives_subgroup) / (\n instances_dataset - instances_subgroup)\n subgroup.statistics['target_share_dataset'] = positives_dataset / instances_dataset\n subgroup.statistics['lift'] = (positives_subgroup / instances_subgroup) / (\n positives_dataset / instances_dataset)\n\n if weighting_attribute is not None:\n (instances_dataset, positives_dataset, instances_subgroup, positives_subgroup) = \\\n NominalTarget.get_base_statistics(subgroup, data, weighting_attribute)\n subgroup.statistics['size_sg_weighted'] = instances_subgroup\n subgroup.statistics['size_dataset_weighted'] = instances_dataset\n subgroup.statistics['positives_sg_weighted'] = positives_subgroup\n subgroup.statistics['positives_dataset_weighted'] = positives_dataset\n\n subgroup.statistics['size_complement_weighted'] = instances_dataset - instances_subgroup\n subgroup.statistics['relative_size_sg_weighted'] = instances_subgroup / instances_dataset\n subgroup.statistics['relative_size_complement_weighted'] = \\\n (instances_dataset - instances_subgroup) / instances_dataset\n subgroup.statistics['coverage_sg_weighted'] = positives_subgroup / positives_dataset\n subgroup.statistics['coverage_complement_weighted'] = (\n positives_dataset - positives_subgroup) / positives_dataset\n subgroup.statistics['target_share_sg_weighted'] = positives_subgroup / instances_subgroup\n subgroup.statistics['target_share_complement_weighted'] = (positives_dataset - positives_subgroup) / (\n instances_dataset - instances_subgroup)\n subgroup.statistics['target_share_dataset_weighted'] = positives_dataset / instances_dataset\n subgroup.statistics['lift_weighted'] = (positives_subgroup / instances_subgroup) / (\n positives_dataset / instances_dataset)\n\n\nclass ChiSquaredQF(AbstractInterestingnessMeasure):\n @staticmethod\n def chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup, min_instances=5,\n bidirect=True, direction_positive=True):\n if (instances_subgroup < min_instances) or ((instances_dataset - instances_subgroup) < min_instances):\n return float(\"-inf\")\n p_subgroup = positives_subgroup / instances_subgroup\n p_dataset = positives_dataset / instances_dataset\n positives_complement = positives_dataset - positives_subgroup\n\n # instancesComplement = instancesDataset - instancesSubgroup\n negatives_subgroup = instances_subgroup - positives_subgroup\n negatives_dataset = instances_dataset - positives_dataset\n negatives_complement = negatives_dataset - negatives_subgroup\n\n # observed = [positivesSubgroup, positives_complement,negatives_subgroup, negatives_complement]\n #\n # if round(positivesSubgroup) < 0 or\n # round(positives_complement) < 0 or\n # round(negatives_subgroup) <0 or\n # round (negatives_complement) < 0:\n # print (\"XXXXX\")\n val = scipy.stats.chi2_contingency([[round(positives_subgroup), round(positives_complement)],\n [round(negatives_subgroup), round(negatives_complement)]],\n correction=False)[0]\n if bidirect:\n return val\n elif direction_positive and p_subgroup > p_dataset:\n return val\n elif not direction_positive and p_subgroup < p_dataset:\n return val\n return -val\n\n @staticmethod\n def chi_squared_qf_weighted(subgroup, data, weighting_attribute, effective_sample_size=0, min_instances=5, ):\n (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics(data,\n weighting_attribute)\n if (instancesSubgroup < min_instances) or ((instancesDataset - instancesSubgroup) < 5):\n return float(\"inf\")\n if effective_sample_size == 0:\n effective_sample_size = effective_sample_size(data[weighting_attribute])\n # p_subgroup = positivesSubgroup / instancesSubgroup\n # p_dataset = positivesDataset / instancesDataset\n\n negatives_subgroup = instancesSubgroup - positivesSubgroup\n negatives_dataset = instancesDataset - positivesDataset\n positives_complement = positivesDataset - positivesSubgroup\n negatives_complement = negatives_dataset - negatives_subgroup\n val = scipy.stats.chi2_contingency([[positivesSubgroup, positives_complement],\n [negatives_subgroup, negatives_complement]], correction=True)[0]\n return scipy.stats.chi2.sf(val * effective_sample_size / instancesDataset, 1)\n\n def __init__(self, direction='bidirect', min_instances=5):\n if direction == 'bidirect':\n self.bidirect = True\n self.direction_positive = True\n if direction == 'positive':\n self.bidirect = False\n self.direction_positive = True\n if direction == 'negative':\n self.bidirect = False\n self.direction_positive = False\n self.min_instances = min_instances\n\n def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None):\n if not self.is_applicable(subgroup):\n raise BaseException(\"Quality measure cannot be used for this target class\")\n if weighting_attribute is None:\n result = self.evaluate_from_statistics(*subgroup.get_base_statistics(data))\n else:\n (instancesDataset, positivesDataset, instancesSubgroup, positivesSubgroup) = subgroup.get_base_statistics(\n data, weighting_attribute)\n weights = data[weighting_attribute]\n base = self.evaluate_from_statistics(instancesDataset, positivesDataset, instancesSubgroup,\n positivesSubgroup)\n result = base * effective_sample_size(weights) / instancesDataset\n return result\n\n def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup):\n return ChiSquaredQF.chi_squared_qf(instances_dataset, positives_dataset, instances_subgroup, positives_subgroup,\n self.min_instances, self.bidirect, self.direction_positive)\n\n def supports_weights(self):\n return True\n\n def is_applicable(self, subgroup):\n return isinstance(subgroup.target, NominalTarget)\n\n\nclass StandardQF(AbstractInterestingnessMeasure, BoundedInterestingnessMeasure):\n @staticmethod\n def standard_qf(a, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup):\n if instances_subgroup == 0:\n return 0\n p_subgroup = positives_subgroup / instances_subgroup\n p_dataset = positives_dataset / instances_dataset\n return (instances_subgroup / instances_dataset) ** a * (p_subgroup - p_dataset)\n\n def __init__(self, a):\n self.a = a\n\n def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None):\n if not self.is_applicable(subgroup):\n raise BaseException(\"Quality measure cannot be used for this target class\")\n return self.evaluate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute))\n\n def optimistic_estimate_from_dataset(self, data, subgroup, weighting_attribute=None):\n if not self.is_applicable(subgroup):\n raise BaseException(\"Quality measure cannot be used for this target class\")\n return self.optimistic_estimate_from_statistics(*subgroup.get_base_statistics(data, weighting_attribute))\n\n def evaluate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup, positives_subgroup):\n return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, instances_subgroup,\n positives_subgroup)\n\n def optimistic_estimate_from_statistics(self, instances_dataset, positives_dataset, instances_subgroup,\n positives_subgroup):\n return StandardQF.standard_qf(self.a, instances_dataset, positives_dataset, positives_subgroup,\n positives_subgroup)\n\n def supports_weights(self):\n return True\n\n def is_applicable(self, subgroup):\n return isinstance(subgroup.target, NominalTarget)\n\n\nclass WRAccQF(StandardQF):\n def __init__(self, a):\n super().__init__(a)\n self.a = 1.0\n\n\nclass LiftQF(StandardQF):\n def __init__(self, a):\n super().__init__(a)\n self.a = 0.0\n\n\nclass SimpleBinomial(StandardQF):\n def __init__(self, a):\n super().__init__(a)\n self.a = 0.5\n\n\n#####\n# GeneralizationAware Interestingness Measures\n#####\nclass GAStandardQF(AbstractInterestingnessMeasure):\n def __init__(self, a):\n self.a = a\n\n def evaluate_from_dataset(self, data, subgroup, weighting_attribute=None):\n (instances_dataset, _, instances_subgroup, positives_subgroup) = subgroup.get_base_statistics(data,\n weighting_attribute)\n if (instances_subgroup == 0) or (instances_dataset == instances_subgroup):\n return 0\n p_subgroup = positives_subgroup / instances_subgroup\n max_target_share = get_max_generalization_target_share(data, subgroup, weighting_attribute)\n relative_size = (instances_subgroup / instances_dataset)\n return relative_size ** self.a * (p_subgroup - max_target_share)\n\n def supports_weights(self):\n return True\n\n def is_applicable(self, subgroup):\n return isinstance(subgroup.target, NominalTarget)\n\n\ndef get_max_generalization_target_share(data, subgroup, weighting_attribute=None):\n selectors = subgroup.subgroup_description.selectors\n generalizations = powerset(selectors)\n max_target_share = 0\n for sels in generalizations:\n sgd = SubgroupDescription(list(sels))\n sg = Subgroup(subgroup.target, sgd)\n (_, _, instances_subgroup, positives_subgroup) = sg.get_base_statistics(data, weighting_attribute)\n target_share = positives_subgroup / instances_subgroup\n max_target_share = max(max_target_share, target_share)\n return max_target_share\n" ]
[ [ "numpy.dot", "numpy.logical_and", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
designer357/MSLSTM
[ "923f29f5a274ae41dbfe79d99e1ea28bb0cf5109" ]
[ "train.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"\nmincheng:[email protected]\n\"\"\"\nfrom __future__ import division\nimport sys\nimport printlog\nimport datetime\nimport os\nimport time\nimport sklearn\nfrom sklearn.metrics import confusion_matrix\nfrom baselines import sclearn\nimport evaluation\nfrom collections import defaultdict\nimport tensorflow as tf\nimport mslstm\nimport config\nimport loaddata\nimport numpy as np\nimport visualize\nfrom sklearn.metrics import accuracy_score\nfrom baselines import nnkeras,sclearn\nimport matplotlib.pyplot as plt\nflags = tf.app.flags\n\nFLAGS = flags.FLAGS\n\n\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert inputs.shape[0] == targets.shape[0]\n\n if shuffle:\n indices = np.arange(inputs.shape[0])\n np.random.shuffle(indices)\n\n for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt]\ndef pprint(msg,method=''):\n #if not 'Warning' in msg:\n if 1<0:\n sys.stdout = printlog.PyLogger('',method+'_'+str(FLAGS.num_neurons1))\n print(msg)\n try:\n sys.stderr.write(msg+'\\n')\n except:\n pass\n #sys.stdout.flush()\n else:\n print(msg)\n#def sess_run(commander,data,label):\n #global sess, data_x, data_y\n #return sess.run(commander, {data_x: data, data_y: label})\n\ndef train_lstm(method,filename_train_list,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list):\n global tempstdout\n FLAGS.option = method\n dropout = 0.8\n x_train, y_train, x_val, y_val, x_test, y_test = loaddata.get_data(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,\n filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class,\n multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,\n waveType=FLAGS.wave_type)\n \"\"\"\n if filename_test == 'HB_AS_Leak.txt':\n filename_train = 'HB_C_N_S.txt'\n elif filename_test == 'HB_Code_Red_I.txt':\n filename_train = 'HB_A_N_S.txt'\n elif filename_test == 'HB_Nimda.txt':\n filename_train = 'HB_A_C_S.txt'\n elif filename_test == 'HB_Slammer.txt':\n filename_train = 'HB_A_C_N.txt'\n print(filename_test)\n #x_train, y_train, x_val, y_val = loaddata.get_trainData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,\n # filename_train, FLAGS.sequence_window, trigger_flag,is_binary_class,\n # multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,\n # waveType=FLAGS.wave_type)\n #x_test, y_test = loaddata.get_testData(FLAGS.pooling_type, FLAGS.is_add_noise, FLAGS.noise_ratio, FLAGS.data_dir,\n # filename_test, FLAGS.sequence_window, trigger_flag,is_binary_class,\n # multiScale=FLAGS.is_multi_scale, waveScale=FLAGS.scale_levels,\n # waveType=FLAGS.wave_type)\n\n \"\"\"\n #loaddata.Multi_Scale_Plotting_2(x_train)\n\n if FLAGS.is_multi_scale:\n FLAGS.scale_levels = x_train.shape[1]\n FLAGS.input_dim = x_train.shape[-1]\n FLAGS.number_class = y_train.shape[1]\n if \"Nimda\" in filename_test:\n FLAGS.batch_size = int(int(x_train.shape[0])/5)\n else:\n FLAGS.batch_size = int(x_train.shape[0])\n else:\n FLAGS.input_dim = x_train.shape[-1]\n FLAGS.number_class = y_train.shape[1]\n if \"Nimda\" in filename_test:\n FLAGS.batch_size = int(int(x_train.shape[0])/5)\n else:\n FLAGS.batch_size = int(x_train.shape[0])\n #g = tf.Graph()\n with tf.Graph().as_default():\n #config = tf.ConfigProto()\n config = tf.ConfigProto(device_count={'/gpu': 0}) #turn GPU on and off\n #config = tf.ConfigProto(log_device_placement=True)\n #config.gpu_options.per_process_gpu_memory_fraction = 0.2\n #with tf.variable_scope(\"middle\")as scope:\n tf.set_random_seed(1337)\n #global_step = tf.Variable(0,name=\"global_step\",trainable=False)\n data_x,data_y = mslstm.inputs(FLAGS.option)\n #output_u_w,prediction, label = mslstm.inference(data_x,data_y,FLAGS.option)\n\n is_training = tf.placeholder(tf.bool)\n prediction, label,output_last = mslstm.inference(data_x,data_y,FLAGS.option,is_training)\n loss = mslstm.loss_(prediction, label)\n tran_op,optimizer = mslstm.train(loss)\n minimize = optimizer.minimize(loss)\n correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(label, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n #summary_op = tf.merge_all_summaries()\n weights = tf.Variable(tf.constant(0.1, shape=[len(y_test)*FLAGS.sequence_window, 1, FLAGS.scale_levels]),\n name=\"weights123\")\n init_op = tf.global_variables_initializer()\n #init_op = tf.initialize_all_variables()\n sess = tf.Session(config=config)\n sess.run(init_op)\n\n #summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph)\n #saver = tf.train.Saver()\n saver = tf.train.Saver({\"my_weights\": weights})\n\n epoch_training_loss_list = []\n epoch_training_acc_list = []\n epoch_val_loss_list = []\n epoch_val_acc_list = []\n early_stopping = 10\n no_of_batches = int(len(x_train) / FLAGS.batch_size)\n #visualize.curve_plotting_withWindow(x_train, y_train, 0, \"Train_\"+'_'+FLAGS.option)\n #visualize.curve_plotting_withWindow(x_test, y_test, 2, \"Test_\"+'_'+FLAGS.option)\n total_iteration = 0\n for i in range(FLAGS.max_epochs):\n if early_stopping > 0:\n pass\n else:\n break\n j_iteration = 0\n for j_batch in iterate_minibatches(x_train,y_train,FLAGS.batch_size,shuffle=False):\n j_iteration += 1\n total_iteration += 1\n inp, out = j_batch\n sess.run(minimize, {data_x: inp, data_y: out, is_training:True})\n training_acc, training_loss = sess.run((accuracy, loss), {data_x: inp, data_y: out,is_training:True})\n #sys.stdout = tempstdout\n val_acc, val_loss = sess.run((accuracy, loss), {data_x:x_val, data_y:y_val,is_training:True})\n pprint(\n FLAGS.option + \"_Epoch%s\" % (str(i + 1)) + \">\" * 3 +'_Titer-'+str(total_iteration) +'_iter-'+str(j_iteration)+ str(FLAGS.wave_type) + '-' + str(FLAGS.scale_levels) + '-' + str(FLAGS.learning_rate)+'-'+str(FLAGS.num_neurons1)+'-'+str(FLAGS.num_neurons2)+ \">>>=\" + \"train_accuracy: %s, train_loss: %s\" % (\n str(training_acc), str(training_loss)) \\\n + \",\\tval_accuracy: %s, val_loss: %s\" % (str(val_acc), str(val_loss)), method)\n\n\n epoch_training_loss_list.append(training_loss)\n epoch_training_acc_list.append(training_acc)\n epoch_val_loss_list.append(val_loss)\n epoch_val_acc_list.append(val_acc)\n\n try:\n max_val_acc = epoch_val_acc_list[-2]\n except:\n max_val_acc = 0\n\n if epoch_val_acc_list[-1] < max_val_acc:\n early_stopping -= 1\n elif epoch_val_acc_list[-1] >= max_val_acc:\n early_stopping = 10\n if val_loss > 10 or val_loss == np.nan:\n break\n if 1<0:\n #pprint(\"PPP\")\n weights_results = sess.run(output_last, {data_x:x_test, data_y: y_test})\n #print(weights_results)\n #sys.stdout = tempstdout\n visualize.curve_plotting(weights_results,y_test,filename_test,FLAGS.option)\n #pprint(\"QQQ\")\n with open(filename_test+\"_EA.txt\",'w')as fout:\n fout.write(weights_results)\n #sess.run(weights.assign(weights_results))\n else:\n pass\n\n #weights = output_u_w.eval(session=sess)\n #weights = saver.restore(sess, \"./tf_tmp/model.ckpt\")\n #pprint(weights)\n #weight_list = return_max_index(weights)\n result = sess.run(prediction, {data_x:x_test, data_y: y_test})\n #print(result)\n #pprint(result)\n #print(\"LLL\")\n saver.save(sess, \"./tf_tmp/model.ckpt\")\n sess.close()\n #results = evaluation.evaluation(y_test, result)#Computing ACCURACY, F1-Score, .., etc\n if is_binary_class == True:\n #sys.stdout = tempstdout\n results = evaluation.evaluation(y_test, result, trigger_flag, evalua_flag) # Computing ACCURACY,F1-score,..,etc\n y_test = loaddata.reverse_one_hot(y_test)\n result = loaddata.reverse_one_hot(result)\n else:\n symbol_list = [0, 1, 2, 3, 4]\n sys.stdout = tempstdout\n print(y_test)\n print(result)\n y_test = loaddata.reverse_one_hot(y_test)\n result = loaddata.reverse_one_hot(result)\n\n confmat = confusion_matrix(y_test, result, labels=symbol_list)\n visualize.plotConfusionMatrix(confmat)\n #accuracy = sklearn.metrics.accuracy_score(y_test, result)\n symbol_list2 = [0]\n y_ = []\n for symbol in symbol_list2:\n for tab in range(len(y_test)):\n if y_test[tab] == symbol and y_test[tab] == result[tab]:\n y_.append(symbol)\n # print(y_test[0:10])\n # rint(result[0:10])\n # print(\"Accuracy is :\"+str(accuracy))\n accuracy = float(len(y_)) / (list(result).count(symbol))\n print(\"Accuracy of \" + str(symbol) + \" is :\" + str(accuracy))\n print(\"True is \")\n # print(y_test)\n print(\"The 0 of True is \" + str(list(y_test).count(0)))\n print(\"The 1 of True is \" + str(list(y_test).count(1)))\n print(\"The 2 of True is \" + str(list(y_test).count(2)))\n print(\"The 3 of True is \" + str(list(y_test).count(3)))\n print(\"The 4 of True is \" + str(list(y_test).count(4)))\n # print(len(y_test))\n print(\"Predict is \")\n # print(result)\n print(\"The 0 of Predict is \" + str(list(result).count(0)))\n print(\"The 1 of Predict is \" + str(list(result).count(1)))\n print(\"The 2 of Predict is \" + str(list(result).count(2)))\n print(\"The 3 of Predict is \" + str(list(result).count(3)))\n print(\"The 4 of Predict is \" + str(list(result).count(4)))\n print(\"Accuracy is :\" + str(accuracy))\n f1_score = sklearn.metrics.f1_score(y_test, result,average=\"macro\")\n print(\"F-score is :\" + str(f1_score))\n results = {'ACCURACY': accuracy, 'F1_SCORE': f1_score, 'AUC': 9999, 'G_MEAN': 9999}\n sys.stdout = tempstdout\n #print(weights_results.shape)\n #print(\"215\")\n y_test2 = np.array(y_test)\n result2 = np.array(result)\n #results = accuracy_score(y_test2, result2)\n #print(y_test2)\n #print(result2)\n #print(results)\n with open(os.path.join(os.path.join(os.getcwd(),'stat'),\"StatFalseAlarm_\" + filename_test + \"_True.txt\"), \"w\") as fout:\n for tab in range(len(y_test2)):\n fout.write(str(int(y_test2[tab])) + '\\n')\n with open(os.path.join(os.path.join(os.getcwd(),'stat'),\"StatFalseAlarm_\" + filename_test + \"_\" + method + \"_\" + \"_Predict.txt\"), \"w\") as fout:\n for tab in range(len(result2)):\n fout.write(str(int(result2[tab])) + '\\n')\n #eval_list = [\"AUC\", \"G_MEAN\",\"ACCURACY\",\"F1_SCORE\"]\n for each_eval in evaluation_list:\n result_list_dict[each_eval].append(results[each_eval])\n\n if evalua_flag:\n with open(os.path.join(FLAGS.output, \"TensorFlow_Log\" + filename_test + \".txt\"), \"a\")as fout:\n if not FLAGS.is_multi_scale:\n outfileline = FLAGS.option + \"_epoch:\" + str(FLAGS.max_epochs) + \",_lr:\" + str(FLAGS.learning_rate) + \",_multi_scale:\" + str(FLAGS.is_multi_scale) + \",hidden_nodes: \"+str(FLAGS.num_neurons1)+\"/\"+str(FLAGS.num_neurons2) + \"\\n\"\n else:\n outfileline = FLAGS.option + \"_epoch:\" + str(FLAGS.max_epochs) + \",_wavelet:\"+str(FLAGS.wave_type) + \",_lr:\" + str(FLAGS.learning_rate) + \",_multi_scale:\" + str(FLAGS.is_multi_scale) + \",_train_set_using_level:\" + str(FLAGS.scale_levels) + \",hidden_nodes: \"+str(FLAGS.num_neurons1)+\"/\"+str(FLAGS.num_neurons2) + \"\\n\"\n\n fout.write(outfileline)\n for each_eval in evaluation_list:\n #for eachk, eachv in result_list_dict.items():\n fout.write(each_eval + \": \" + str(round(np.mean(result_list_dict[each_eval]), 3)) + \",\\t\")\n fout.write('\\n')\n return epoch_training_acc_list,epoch_val_acc_list,epoch_training_loss_list,epoch_val_loss_list\n else:\n return results\n\n\n\n\ndef train_classic(method,filename_train,filename_test, trigger_flag,evalua_flag,is_binary_class,evaluation_list):\n return sclearn.Basemodel(method,filename_train,filename_test,trigger_flag,evalua_flag,evaluation_list)\n\ndef train(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type='db1'):\n global data_x, data_y\n result_list_dict = defaultdict(list)\n #evaluation_list = [\"ACCURACY\", \"F1_SCORE\", \"AUC\", \"G_MEAN\"]\n for each in evaluation_list:\n result_list_dict[each] = []\n if 'L' in method or 'RNN' in method:\n sys.stdout = tempstdout\n if method == '1L' or method == '2L' or method == '3L' \\\n or method == '4L' or method == '5L' or method == 'RNN':\n #FLAGS.learning_rate = 0.01\n FLAGS.is_multi_scale = False\n elif 'AL' == method:\n #FLAGS.learning_rate = 0.01\n FLAGS.is_multi_scale = False\n else:\n #FLAGS.learning_rate = 0.05\n FLAGS.is_multi_scale = True\n FLAGS.wave_type = wave_type\n return train_lstm(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list)\n else:\n sys.stdout = tempstdout\n return train_classic(method,filename_train,filename_test,trigger_flag,evalua_flag,is_binary_class,result_list_dict,evaluation_list)\n\ndef main(unused_argv):\n global tempstdout\n\n #main function\n\n\n #wave_type_list =['db1','db2','haar','coif1','db1','db2','haar','coif1','db1','db2']\n wave_type_list = ['haar']\n multi_scale_value_list = [2,3,4,5,6,10]\n case_label = {'SVM':'SVM','NB':'NB','DT':'DT','Ada.Boost':'Ada.Boost','RF':'RF','1NN':'1NN','1NN-DTW':'DTW',\n 'SVMF':'SVMF','SVMW':'SVMW','MLP':'MLP','RNN':'RNN','1L':'LSTM','2L':'2-LSTM','3L':'3-LSTM',\\\n 'AL':'ALSTM','HL':'MSLSTM','HAL':'MSLSTM'}\n\n trigger_flag = 1\n evalua_flag = True\n is_binary_class = True\n single_layer = True\n\n if is_binary_class:\n filename_list = [\"HB_AS_Leak.txt\",\"HB_Code_Red_I.txt\",\"HB_Nimda.txt\",\"HB_Slammer.txt\"]\n #filename_list = [\"HB_Slammer.txt\"] # HB_Code_Red_I.txt\n # HB_Nimda.txt\n # HB_Slammer.txt\n else:\n filename_list = [\"HB_ALL.txt\"]\n\n if trigger_flag == 1 :\n if single_layer:\n #case = ['AL']\n #case = ['1L','3L','AL']\n case = ['MLP','RNN','1L','2L','3L','AL']\n else:\n case = ['HL','HAL']\n #case = ['HL','HAL']\n\n else:\n case = [\"1NN\"]\n #case = [\"RF\",\"SVM\",\"SVMF\",\"SVMW\",\"NB\",\"DT\",\"Ada.Boost\",\"1NN\"]\n #case = [\"NB\",\"1NN\",\"Ada.Boost\",\"RF\"]\n\n if evalua_flag:\n evaluation_list = [\"AUC\", \"G_MEAN\", \"ACCURACY\", \"F1_SCORE\"]\n else:\n evaluation_list = [\"FPR\", \"TPR\",\"AUC\",\"G_MEAN\"]\n\n wave_type = wave_type_list[0]\n hidden_unit1_list = [8,16,32,64,128,256]\n #hidden_unit1_list = [16]\n\n hidden_unit2_list = [8,16,32,64,128]\n #hidden_unit2_list = [8]\n\n\n #combination_list = [(16,8),(16,32),(16,64),(32,64),(128,16)]\n #combination_list = [(8,8),(8,32),(16,8),(16,64),(128,16),(128,64)]\n #learning_rate_list = [0.001, 0.01, 0.05, 0.1]\n learning_rate_list = [0.1,0.05,0.01,0.001]\n\n for tab in range(len(filename_list)):\n case_list = []\n train_acc_list = []\n val_acc_list = []\n train_loss_list = []\n val_loss_list = []\n if single_layer:\n combination_list = hidden_unit1_list\n else:\n combination_list = []\n for each1 in hidden_unit1_list:\n for each2 in hidden_unit2_list:\n combination_list.append((each1, each2))\n \"\"\"\n if filename_list[tab] == \"HB_AS_Leak.txt\":\n combination_list = [(32, 64), (32, 128), (64, 64)]\n elif filename_list[tab] == \"HB_Code_Red_I.txt\":\n combination_list = [(32, 32), (16, 8), (16, 64), (32, 64)]\n elif filename_list[tab] == \"HB_Nimda.txt\":\n combination_list = [(8, 32), (32, 64)]\n elif filename_list[tab] == \"HB_Slammer.txt\":\n combination_list = [(16, 8), (16, 32), (16, 64)]\n \"\"\"\n\n results = {}\n for each_case in case:\n if 1>0:\n case_list.append(case_label[each_case])\n if trigger_flag: #\n sys.stdout = tempstdout\n if each_case == 'MLP':\n if evalua_flag:\n nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)\n else:\n results[case_label[each_case]] = nnkeras.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)\n\n else:\n if evalua_flag:\n for learning_rate in learning_rate_list:\n FLAGS.learning_rate = learning_rate\n\n for each_comb in combination_list:\n if not 'H' in each_case:\n FLAGS.num_neurons1 = each_comb\n #FLAGS.num_neurons1 = 16\n #FLAGS.learning_rate = 0.001\n else:\n #if each_case == 'HAL':\n #FLAGS.num_neurons1, FLAGS.num_neurons2 = (100,64)\n #elif each_case == 'HL':\n #FLAGS.num_neurons1, FLAGS.num_neurons2 = (16,8)\n FLAGS.num_neurons1, FLAGS.num_neurons2 = each_comb\n\n train_acc,val_acc,train_loss,val_loss = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type)\n train_acc_list.append(train_acc)\n val_acc_list.append(val_acc)\n train_loss_list.append(train_loss)\n val_loss_list.append(val_loss)\n #visualize.epoch_acc_plotting(filename_list[tab],case_list,FLAGS.sequence_window,FLAGS.learning_rate,train_acc_list,val_acc_list)\n #visualize.epoch_loss_plotting(filename_list[tab], case_list,FLAGS.sequence_window, FLAGS.learning_rate,train_loss_list, val_loss_list)\n else:\n results[case_label[each_case]] = train(each_case,filename_list, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list,wave_type)\n\n else:\n sys.stdout = tempstdout\n if evalua_flag:\n sclearn.Basemodel(each_case, filename_list[tab], trigger_flag, evalua_flag,is_binary_class,evaluation_list)\n else:\n results[case_label[each_case]] = sclearn.Basemodel(each_case, filename_list[tab],trigger_flag,evalua_flag,is_binary_class,evaluation_list)\n else:\n pass\n if not evalua_flag:\n visualize.plotAUC(results,case_list,filename_list[tab])\n else:\n if trigger_flag:\n try:\n print()\n #visualize.epoch_acc_plotting(filename_list[tab], case_list, FLAGS.sequence_window,FLAGS.learning_rate, train_acc_list, val_acc_list)\n #visualize.epoch_loss_plotting(filename_list[tab], case_list, FLAGS.sequence_window,FLAGS.learning_rate, train_loss_list, val_loss_list)\n except:\n pass\n end = time.time()\n pprint(\"The time elapsed : \" + str(end - start) + ' seconds.\\n')\n\n\nif __name__ == \"__main__\":\n global tempstdout\n tempstdout = sys.stdout\n pprint(\"------------------------------------------------\"+str(datetime.datetime.now())+\"--------------------------------------------\")\n start = time.time()\n tf.app.run()\n" ]
[ [ "tensorflow.Graph", "numpy.arange", "tensorflow.cast", "tensorflow.argmax", "tensorflow.placeholder", "numpy.random.shuffle", "sklearn.metrics.confusion_matrix", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "numpy.mean", "tensorflow.Session", "sklearn.metrics.f1_score", "tensorflow.set_random_seed", "tensorflow.train.Saver", "numpy.array", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
erinfolami/ZazuML
[ "c5247859353cacf0e4a58f9c530a07038d9e12cf" ]
[ "ObjectDetNet/retinanet/dataloaders/custom_transforms.py" ]
[ "import math\nimport torch\nimport random\nimport numpy as np\nimport torch.nn as nn\nfrom numpy import int64 as int64\nimport torchvision.transforms as transforms\n\nfrom PIL import Image, ImageOps, ImageFilter\n\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n img = np.array(img).astype(np.float32)\n mask = np.array(mask).astype(np.float32)\n img /= 255.0\n img -= self.mean\n img /= self.std\n\n return {'image': img,\n 'label': mask}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n img = sample['image']\n mask = sample['label']\n img = np.array(img).astype(np.float32).transpose((2, 0, 1))\n mask = np.array(mask).astype(np.float32)\n\n img = torch.from_numpy(img).float()\n mask = torch.from_numpy(mask).float()\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomHorizontalFlip(object):\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomRotate(object):\n def __init__(self, degree):\n self.degree = degree\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n rotate_degree = random.uniform(-1 * self.degree, self.degree)\n img = img.rotate(rotate_degree, Image.BILINEAR)\n mask = mask.rotate(rotate_degree, Image.NEAREST)\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomGaussianBlur(object):\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n if random.random() < 0.5:\n img = img.filter(ImageFilter.GaussianBlur(\n radius=random.random()))\n\n return {'image': img,\n 'label': mask}\n\n\nclass RandomScaleCrop(object):\n def __init__(self, base_size, crop_size, fill=0):\n self.base_size = base_size\n self.crop_size = crop_size\n self.fill = fill\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n # random scale (short edge)\n short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))\n w, h = img.size\n if h > w:\n ow = short_size\n oh = int(1.0 * h * ow / w)\n else:\n oh = short_size\n ow = int(1.0 * w * oh / h)\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n # pad crop\n if short_size < self.crop_size:\n padh = self.crop_size - oh if oh < self.crop_size else 0\n padw = self.crop_size - ow if ow < self.crop_size else 0\n img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)\n # random crop crop_size\n w, h = img.size\n x1 = random.randint(0, w - self.crop_size)\n y1 = random.randint(0, h - self.crop_size)\n img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n\n return {'image': img,\n 'label': mask}\n\n\nclass FixScaleCrop(object):\n def __init__(self, crop_size):\n self.crop_size = crop_size\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n w, h = img.size\n if w > h:\n oh = self.crop_size\n ow = int(1.0 * w * oh / h)\n else:\n ow = self.crop_size\n oh = int(1.0 * h * ow / w)\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n # center crop\n w, h = img.size\n x1 = int(round((w - self.crop_size) / 2.))\n y1 = int(round((h - self.crop_size) / 2.))\n img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n\n return {'image': img,\n 'label': mask}\n\n\n# resize to 512*1024\nclass FixedResize(object):\n \"\"\"change the short edge length to size\"\"\"\n\n def __init__(self, resize=512):\n self.size1 = resize # size= 512\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n assert img.size == mask.size\n\n w, h = img.size\n if w > h:\n oh = self.size1\n ow = int(1.0 * w * oh / h)\n else:\n ow = self.size1\n oh = int(1.0 * h * ow / w)\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n return {'image': img,\n 'label': mask}\n\n\n# random crop 321*321\nclass RandomCrop(object):\n def __init__(self, crop_size=320):\n self.crop_size = crop_size\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n w, h = img.size\n x1 = random.randint(0, w - self.crop_size)\n y1 = random.randint(0, h - self.crop_size)\n img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))\n return {'image': img,\n 'label': mask}\n\n\nclass RandomScale(object):\n def __init__(self, scales=(1,)):\n self.scales = scales\n\n def __call__(self, sample):\n img = sample['image']\n mask = sample['label']\n w, h = img.size\n scale = random.choice(self.scales)\n w, h = int(w * scale), int(h * scale)\n return {'image': img,\n 'label': mask}\n\n\nclass TransformTr(object):\n def __init__(self, resize, multi_scale=None):\n if multi_scale is None:\n self.composed_transforms = transforms.Compose([\n FixedResize(resize=resize),\n # RandomCrop(crop_size=args.crop_size),\n # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),\n # tr.RandomGaussianBlur(),\n # Normalize(mean, std),\n # ToTensor()\n ])\n else:\n self.composed_transforms = transforms.Compose([\n FixedResize(resize=args.resize),\n RandomScale(scales=args.multi_scale),\n RandomCrop(crop_size=args.crop_size),\n # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),\n # tr.RandomGaussianBlur(),\n Normalize(mean, std),\n ToTensor()])\n\n def __call__(self, sample):\n return self.composed_transforms(sample)\n\n\nclass TransformVal(object):\n def __init__(self, args, mean, std):\n self.composed_transforms = transforms.Compose([\n FixedResize(resize=args.resize),\n FixScaleCrop(crop_size=args.crop_size), # TODO:CHECK THIS\n Normalize(mean, std),\n ToTensor()])\n\n def __call__(self, sample):\n return self.composed_transforms(sample)\n" ]
[ [ "numpy.array", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DwaraknathT/sparsify
[ "bbe3b6e492c2bc8fdd9dd37d87ffc5f51f520792" ]
[ "models/resnet.py" ]
[ "'''\nProperly implemented ResNet-s for CIFAR10 as described in paper [1].\n\nThe implementation and structure of this file is hugely influenced by [2]\nwhich is implemented for ImageNet and doesn't have option A for identity.\nMoreover, most of the implementations on the web is copy-paste from\ntorchvision's resnet and has wrong number of params.\n\nProper ResNet-s for CIFAR10 (for fair comparision and etc.) has following\nnumber of layers and parameters:\n\nname | layers | params\nResNet20 | 20 | 0.27M\nResNet32 | 32 | 0.46M\nResNet44 | 44 | 0.66M\nResNet56 | 56 | 0.85M\nResNet110 | 110 | 1.7M\nResNet1202| 1202 | 19.4m\n\nwhich this implementation indeed has.\n\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\nIf you use this implementation in you work, please don't forget to mention the\nauthor, Yerlan Idelbayev.\n'''\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\n\nfrom layers.layers import MaskedConv\n\n__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']\n\n\ndef _weights_init(m):\n classname = m.__class__.__name__\n # print(classname)\n if isinstance(m, nn.Linear) or isinstance(m, MaskedConv):\n init.xavier_normal_(m.weight)\n\n\n_AFFINE = True\n\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = MaskedConv(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, affine=_AFFINE)\n self.conv2 = MaskedConv(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, affine=_AFFINE)\n\n self.downsample = None\n self.bn3 = None\n if stride != 1 or in_planes != planes:\n self.downsample = nn.Sequential(\n MaskedConv(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False))\n self.bn3 = nn.BatchNorm2d(self.expansion * planes, affine=_AFFINE)\n\n def forward(self, x):\n # x: batch_size * in_c * h * w\n residual = x\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n if self.downsample is not None:\n residual = self.bn3(self.downsample(x))\n out += residual\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n _outputs = [32, 64, 128]\n self.in_planes = _outputs[0]\n\n self.conv1 = MaskedConv(3, _outputs[0], kernel_size=3, stride=1, padding=1, bias=False)\n self.bn = nn.BatchNorm2d(_outputs[0], affine=_AFFINE)\n self.layer1 = self._make_layer(block, _outputs[0], num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, _outputs[1], num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, _outputs[2], num_blocks[2], stride=2)\n self.linear = nn.Linear(_outputs[2], num_classes)\n\n self.apply(_weights_init)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, out.size()[3])\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef resnet20(num_classes):\n return ResNet(BasicBlock, [3, 3, 3], num_classes=num_classes)\n\n\ndef resnet32(num_classes):\n return ResNet(BasicBlock, [5, 5, 5], num_classes=num_classes)\n\n\ndef resnet44(num_classes):\n return ResNet(BasicBlock, [7, 7, 7], num_classes=num_classes)\n\n\ndef resnet56(num_classes):\n return ResNet(BasicBlock, [9, 9, 9], num_classes=num_classes)\n\n\ndef resnet110(num_classes):\n return ResNet(BasicBlock, [18, 18, 18], num_classes=num_classes)\n\n\ndef resnet1202(num_classes):\n return ResNet(BasicBlock, [200, 200, 200], num_clases=num_classes)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.xavier_normal_", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sweetpand/tensorflow_mri
[ "7a483cbbbe515ad395928311759505707bd72503", "7a483cbbbe515ad395928311759505707bd72503" ]
[ "recommendation_system_demos/Basic-CMN-Demo/util/gmf.py", "recommendation_system_demos/Basic-DIEN-Demo/source_code/utils.py" ]
[ "import sonnet as snt\nimport tensorflow as tf\n\nfrom util.helper import GraphKeys, add_to_collection\nfrom util.layers import DenseLayer, LossLayer, OptimizerLayer, ModelBase\n\n\nclass PairwiseGMF(ModelBase):\n\n def __init__(self, config):\n \"\"\"\n :param config:\n \"\"\"\n # super(PairwiseGMF, self).__init__(config)\n self.config = config\n self._activation_fn = tf.nn.relu\n self._embedding_initializers = {\n 'embeddings': tf.truncated_normal_initializer(stddev=0.01),\n }\n\n self._embedding_regularizers = {}\n\n self._initializers = {\n \"w\": tf.contrib.layers.xavier_initializer(),\n }\n\n self._regularizers = {\n 'w': tf.contrib.layers.l2_regularizer(config.l2)\n }\n\n self._construct_placeholders()\n self._construct_weights()\n self._construct()\n tf.summary.scalar('Model/Loss', tf.get_collection(GraphKeys.LOSSES)[0])\n self.summary = tf.summary.merge_all()\n\n def _construct(self):\n \"\"\"\n Construct the model; main part of it goes here\n \"\"\"\n\n self.v = DenseLayer(1, False, tf.nn.relu, initializers=self._initializers,\n regularizers=self._regularizers, name='OutputVector')\n self.score = tf.squeeze(self.v(self._cur_user * self._cur_item))\n negative_output = tf.squeeze(self.v(self._cur_user * self._cur_item_negative))\n tf.add_to_collection(GraphKeys.PREDICTION, self.score)\n self.loss = LossLayer()(self.score, negative_output)\n self._optimizer = OptimizerLayer(self.config.optimizer, clip=5.0,\n params={})\n self.train = self._optimizer(self.loss)\n\n def _construct_weights(self):\n \"\"\"\n Constructs the user/item memories and user/item external memory/outputs\n Also add the embedding lookups\n \"\"\"\n self.user_memory = snt.Embed(self.config.user_count, self.config.embed_size,\n initializers=self._embedding_initializers,\n regularizers=self._embedding_regularizers,\n name='MemoryEmbed')\n\n self.item_memory = snt.Embed(self.config.item_count,\n self.config.embed_size,\n initializers=self._embedding_initializers,\n regularizers=self._embedding_regularizers,\n name=\"ItemMemory\")\n\n # [batch, embedding size]\n self._cur_user = self.user_memory(self.input_users)\n\n # Item memories a query\n self._cur_item = self.item_memory(self.input_items)\n self._cur_item_negative = self.item_memory(self.input_items_negative)\n\n def _construct_placeholders(self):\n self.input_users = tf.placeholder(tf.int32, [None], 'UserID')\n self.input_items = tf.placeholder(tf.int32, [None], 'ItemID')\n self.input_items_negative = tf.placeholder(tf.int32, [None], 'NegativeItemID')\n\n # Add our placeholders\n add_to_collection(GraphKeys.PLACEHOLDER, [self.input_users,\n self.input_items,\n self.input_items_negative])\n", "import tensorflow as tf\nfrom tensorflow.python.ops.rnn_cell import *\nfrom tensorflow.python.ops.rnn_cell_impl import _Linear\n\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variable_scope as vs\n\n\nclass QAAttGRUCell(RNNCell):\n \"\"\"Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).\n Args:\n num_units: int, The number of units in the GRU cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n kernel_initializer: (optional) The initializer to use for the weight and\n projection matrices.\n bias_initializer: (optional) The initializer to use for the bias.\n \"\"\"\n\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None):\n super(QAAttGRUCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n self._gate_linear = None\n self._candidate_linear = None\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n def __call__(self, inputs, state, att_score):\n return self.call(inputs, state, att_score)\n\n def call(self, inputs, state, att_score=None):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n if self._gate_linear is None:\n bias_ones = self._bias_initializer\n if self._bias_initializer is None:\n bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)\n with vs.variable_scope(\"gates\"): # Reset gate and update gate.\n self._gate_linear = _Linear(\n [inputs, state],\n 2 * self._num_units,\n True,\n bias_initializer=bias_ones,\n kernel_initializer=self._kernel_initializer)\n\n value = math_ops.sigmoid(self._gate_linear([inputs, state]))\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n if self._candidate_linear is None:\n with vs.variable_scope(\"candidate\"):\n self._candidate_linear = _Linear(\n [inputs, r_state],\n self._num_units,\n True,\n bias_initializer=self._bias_initializer,\n kernel_initializer=self._kernel_initializer)\n c = self._activation(self._candidate_linear([inputs, r_state]))\n new_h = (1. - att_score) * state + att_score * c\n return new_h, new_h\n\nclass VecAttGRUCell(RNNCell):\n \"\"\"Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).\n Args:\n num_units: int, The number of units in the GRU cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n kernel_initializer: (optional) The initializer to use for the weight and\n projection matrices.\n bias_initializer: (optional) The initializer to use for the bias.\n \"\"\"\n\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None):\n super(VecAttGRUCell, self).__init__(_reuse=reuse)\n self._num_units = num_units\n self._activation = activation or math_ops.tanh\n self._kernel_initializer = kernel_initializer\n self._bias_initializer = bias_initializer\n self._gate_linear = None\n self._candidate_linear = None\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n def __call__(self, inputs, state, att_score):\n return self.call(inputs, state, att_score)\n def call(self, inputs, state, att_score=None):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n if self._gate_linear is None:\n bias_ones = self._bias_initializer\n if self._bias_initializer is None:\n bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)\n with vs.variable_scope(\"gates\"): # Reset gate and update gate.\n self._gate_linear = _Linear(\n [inputs, state],\n 2 * self._num_units,\n True,\n bias_initializer=bias_ones,\n kernel_initializer=self._kernel_initializer)\n\n value = math_ops.sigmoid(self._gate_linear([inputs, state]))\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n if self._candidate_linear is None:\n with vs.variable_scope(\"candidate\"):\n self._candidate_linear = _Linear(\n [inputs, r_state],\n self._num_units,\n True,\n bias_initializer=self._bias_initializer,\n kernel_initializer=self._kernel_initializer)\n c = self._activation(self._candidate_linear([inputs, r_state]))\n u = (1.0 - att_score) * u\n new_h = u * state + (1 - u) * c\n return new_h, new_h\n\ndef prelu(_x, scope=''):\n \"\"\"parametric ReLU activation\"\"\"\n with tf.variable_scope(name_or_scope=scope, default_name=\"prelu\"):\n _alpha = tf.get_variable(\"prelu_\"+scope, shape=_x.get_shape()[-1],\n dtype=_x.dtype, initializer=tf.constant_initializer(0.1))\n return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)\n\ndef calc_auc(raw_arr):\n \"\"\"Summary\n Args:\n raw_arr (TYPE): Description\n Returns:\n TYPE: Description\n \"\"\"\n\n arr = sorted(raw_arr, key=lambda d:d[0], reverse=True)\n pos, neg = 0., 0.\n for record in arr:\n if record[1] == 1.:\n pos += 1\n else:\n neg += 1\n\n fp, tp = 0., 0.\n xy_arr = []\n for record in arr:\n if record[1] == 1.:\n tp += 1\n else:\n fp += 1\n xy_arr.append([fp/neg, tp/pos])\n\n auc = 0.\n prev_x = 0.\n prev_y = 0.\n for x, y in xy_arr:\n if x != prev_x:\n auc += ((x - prev_x) * (y + prev_y) / 2.)\n prev_x = x\n prev_y = y\n\n return auc\n\ndef attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n\n mask = tf.equal(mask, tf.ones_like(mask))\n hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n input_size = query.get_shape().as_list()[-1]\n\n # Trainable parameters\n w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))\n w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))\n b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))\n\n with tf.name_scope('v'):\n # Applying fully connected layer with non-linear activation to each of the B*T timestamps;\n # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size\n tmp1 = tf.tensordot(facts, w1, axes=1)\n tmp2 = tf.tensordot(query, w2, axes=1)\n tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])\n tmp = tf.tanh((tmp1 + tmp2) + b)\n\n # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector\n v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape\n key_masks = mask # [B, 1, T]\n # key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)\n v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]\n alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape\n\n # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape\n #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)\n output = facts * tf.expand_dims(alphas, -1)\n output = tf.reshape(output, tf.shape(facts))\n # output = output / (facts.get_shape().as_list()[-1] ** 0.5)\n if not return_alphas:\n return output\n else:\n return output, alphas\n\ndef din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n print (\"querry_size mismatch\")\n query = tf.concat(values = [\n query,\n query,\n ], axis=1)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n mask = tf.equal(mask, tf.ones_like(mask))\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)\n d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)\n d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])\n scores = d_layer_3_all\n # Mask\n # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]\n key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(scores) * (-2 ** 32 + 1)\n scores = tf.where(key_masks, scores, paddings) # [B, 1, T]\n\n # Scale\n # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)\n\n # Activation\n if softmax_stag:\n scores = tf.nn.softmax(scores) # [B, 1, T]\n\n # Weighted sum\n if mode == 'SUM':\n output = tf.matmul(scores, facts) # [B, 1, H]\n # output = tf.reshape(output, [-1, tf.shape(facts)[-1]])\n else:\n scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])\n output = facts * tf.expand_dims(scores, -1)\n output = tf.reshape(output, tf.shape(facts))\n return output\n\ndef din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n # Trainable parameters\n mask = tf.equal(mask, tf.ones_like(mask))\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)\n query = prelu(query)\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)\n d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)\n d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])\n scores = d_layer_3_all\n # Mask\n # key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]\n key_masks = tf.expand_dims(mask, 1) # [B, 1, T]\n paddings = tf.ones_like(scores) * (-2 ** 32 + 1)\n if not forCnn:\n scores = tf.where(key_masks, scores, paddings) # [B, 1, T]\n\n # Scale\n # scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)\n\n # Activation\n if softmax_stag:\n scores = tf.nn.softmax(scores) # [B, 1, T]\n\n # Weighted sum\n if mode == 'SUM':\n output = tf.matmul(scores, facts) # [B, 1, H]\n # output = tf.reshape(output, [-1, tf.shape(facts)[-1]])\n else:\n scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])\n output = facts * tf.expand_dims(scores, -1)\n output = tf.reshape(output, tf.shape(facts))\n if return_alphas:\n return output, scores\n return output\n\ndef self_attention(facts, ATTENTION_SIZE, mask, stag='null'):\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n def cond(batch, output, i):\n return tf.less(i, tf.shape(batch)[1])\n\n def body(batch, output, i):\n self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],\n ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,\n mode='LIST')\n self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)\n output = output.write(i, self_attention_tmp)\n return batch, output, i + 1\n\n output_ta = tf.TensorArray(dtype=tf.float32,\n size=0,\n dynamic_size=True,\n element_shape=(facts[:, 0, :].get_shape()))\n _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])\n self_attention = output_op.stack()\n self_attention = tf.transpose(self_attention, perm = [1, 0, 2])\n return self_attention\n\ndef self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):\n if len(facts.get_shape().as_list()) == 2:\n facts = tf.expand_dims(facts, 1)\n\n def cond(batch, output, i):\n return tf.less(i, tf.shape(batch)[1])\n\n def body(batch, output, i):\n self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,\n ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,\n mode='LIST')\n self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)\n output = output.write(i, self_attention_tmp)\n return batch, output, i + 1\n\n output_ta = tf.TensorArray(dtype=tf.float32,\n size=0,\n dynamic_size=True,\n element_shape=(facts[:, 0, :].get_shape()))\n _, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])\n self_attention = output_op.stack()\n self_attention = tf.transpose(self_attention, perm = [1, 0, 2])\n return self_attention\n\ndef din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):\n if isinstance(facts, tuple):\n # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.\n facts = tf.concat(facts, 2)\n\n if time_major:\n # (T,B,D) => (B,T,D)\n facts = tf.array_ops.transpose(facts, [1, 0, 2])\n # Trainable parameters\n mask = tf.equal(mask, tf.ones_like(mask))\n facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer\n querry_size = query.get_shape().as_list()[-1]\n query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)\n query = prelu(query)\n queries = tf.tile(query, [1, tf.shape(facts)[1]])\n queries = tf.reshape(queries, tf.shape(facts))\n din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)\n d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)\n d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)\n d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))\n output = d_layer_2_all\n return output\n" ]
[ [ "tensorflow.get_collection", "tensorflow.placeholder", "tensorflow.truncated_normal_initializer", "tensorflow.summary.merge_all", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.add_to_collection" ], [ "tensorflow.concat", "tensorflow.python.ops.array_ops.split", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.tanh", "tensorflow.where", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.rnn_cell_impl._Linear", "tensorflow.while_loop", "tensorflow.layers.dense", "tensorflow.name_scope", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.tensordot", "tensorflow.matmul", "tensorflow.shape", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.maximum", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.array_ops.transpose", "tensorflow.random_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
chaoannricardo/NTU_CARDO_Database
[ "5fbfa1383f2e65a04fabd863c68373f45bbf05fd" ]
[ "DBUIScripts/db_update.py" ]
[ "# -*- coding: utf8 -*-\nimport pandas as pd\nimport pymysql\n\n# import configuration in parent dir\nimport os, sys, inspect\n\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\nimport configuration as conf\n# import packages in models\nfrom models import data_processing, database_management, file_management\n\n\nif __name__ == '__main__':\n\n # change working directory to project location\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(os.path.dirname(abspath))\n os.chdir(dname)\n\n # script parameters\n name_to_update = sys.argv[1]\n update_type = sys.argv[2]\n update_method = sys.argv[3]\n update_point = sys.argv[4]\n\n # start updating database content\n\n # log in database\n config = conf.auto_log_in(\"cardo_main\")\n conn = pymysql.connect(**config)\n conn_cursor = conn.cursor()\n\n # set up parameters for update\n column_to_update = \"\"\n if update_type == \"0\":\n column_to_update = \"是否計算黑名單\"\n elif update_type == \"1\":\n column_to_update = \"CARDO點數\"\n\n command_text = \"\"\n # update table with different method\n if update_method == \"0\":\n command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = 0 WHERE 姓名 = \" + name_to_update + \";\"\n elif update_method == \"1\":\n command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = \" + update_point + \\\n \" WHERE `姓名` = \\'\" + name_to_update + \"\\';\"\n # command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = 0 WHERE 姓名 = \" + name_to_update + \";\"\n elif update_method == \"2\":\n select_command = \"SELECT \" + column_to_update + \" FROM cardo_main.點數記錄表_目前 WHERE 姓名 = \\'\" + name_to_update + \"\\';\"\n data_return = pd.read_sql(select_command, conn)\n update_point = str(int(data_return.iloc[0, 0]) - int(update_point))\n command_text = \"UPDATE cardo_main.點數記錄表_目前 SET \" + column_to_update + \" = \" + update_point + \" WHERE 姓名 = \\'\" + name_to_update + \"\\';\"\n\n # execute command\n conn_cursor.execute(\"SET SQL_SAFE_UPDATES = 0;\")\n conn.commit()\n conn_cursor.execute(command_text)\n conn.commit()\n" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hellwue/TreeSpeciesClassification
[ "8fd8dc6496d8317923c6112d3da46844d419e49f" ]
[ "PointCNN/pointcnn_geom+i+ms.py" ]
[ "import pickle\nfrom myutils import load_dataset, call_home, CMDisplay\nfrom itertools import chain\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear as Lin\nfrom torch.optim import Adam\n\nfrom torch_geometric.nn import XConv, fps, global_mean_pool\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n\nfrom sklearn.metrics import classification_report as ClRp\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\n\n\nclass PointCNN(pl.LightningModule):\n def __init__(self, numfeatures=1):\n super().__init__()\n\n self.learning_rate = 1e-3\n self.train_acc = pl.metrics.Accuracy()\n self.val_acc = pl.metrics.Accuracy()\n self.test_acc = pl.metrics.Accuracy()\n self.numfeatures = numfeatures\n # First XConv layer.\n # Lifting the point coordinates with no features (0) into feature space\n self.conv1 = XConv(self.numfeatures, 48, dim=3,\n kernel_size=8, hidden_channels=32)\n # Further XConv layers to further enrich the features\n self.conv2 = XConv(48, 96, dim=3, kernel_size=12,\n hidden_channels=64, dilation=2)\n self.conv3 = XConv(96, 192, dim=3, kernel_size=16,\n hidden_channels=128, dilation=2)\n self.conv4 = XConv(192, 384, dim=3, kernel_size=16,\n hidden_channels=256, dilation=2)\n\n # MLPs at the end of the PointCNN\n self.lin1 = Lin(389, 256)\n self.lin2 = Lin(256, 128)\n self.lin3 = Lin(128, 4)\n\n def forward(self, data):\n pos, batch = data.pos, data.batch\n x = data.x if self.numfeatures else None\n ms_feat = data.feat\n # First XConv with no features\n x = F.relu(self.conv1(x, pos, batch))\n\n # Farthest point sampling, keeping only 37.5%\n idx = fps(pos, batch, ratio=0.375)\n x, pos, batch = x[idx], pos[idx], batch[idx]\n\n # Second XConv\n x = F.relu(self.conv2(x, pos, batch))\n\n # Farthest point sampling, keeping only 33.4%\n idx = fps(pos, batch, ratio=0.334)\n x, pos, batch = x[idx], pos[idx], batch[idx]\n\n # Two more XConvs\n x = F.relu(self.conv3(x, pos, batch))\n x = F.relu(self.conv4(x, pos, batch))\n\n # Pool the batch-elements together\n # Each tree is described in one single point with 384 features\n x = global_mean_pool(x, batch)\n\n x = torch.cat((x, ms_feat), dim=1)\n # MLPs at the end with ReLU\n x = F.relu(self.lin1(x))\n x = F.relu(self.lin2(x))\n\n # Dropout (?!): Set randomly values to zero\n x = F.dropout(x, p=0.5, training=self.training)\n # Last MLP predicting labels\n x = self.lin3(x)\n\n # log-SoftMax Activation function to then calculate NLL-Loss (Negative Log Likelihood)\n return F.log_softmax(x, dim=-1)\n\n def training_step(self, data, batch_idx):\n y = data.y\n out = self(data)\n loss = F.nll_loss(out, y)\n self.train_acc(out, y)\n self.log('train_acc', self.train_acc, on_step=True, on_epoch=True)\n self.log('train_loss', loss) # , on_step=True, on_epoch=True)\n return loss\n\n def validation_step(self, data, batch_idx):\n y = data.y\n out = self(data)\n val_loss = F.nll_loss(out, y)\n self.val_acc(out, y)\n self.log('val_acc', self.val_acc, on_step=True, on_epoch=True)\n self.log('val_loss', val_loss) # , on_step=True, on_epoch=True)\n return val_loss\n\n def test_step(self, data, batch_idx):\n y = data.y\n out = self(data)\n test_loss = F.nll_loss(out, y)\n self.test_acc(out, y)\n self.log('test_loss', test_loss)\n return out\n\n def test_step_end(self, outs):\n return outs\n\n def test_epoch_end(self, outs):\n global res\n res = outs\n return outs\n\n def configure_optimizers(self):\n optimizer = Adam(self.parameters(), lr=self.learning_rate)\n return optimizer\n\n\nMODEL_NAME = 'geom+i+ms'\n\n\ntrain_dataset = load_dataset(\n '../../0_data/hdf/train.h5', batch_size=16, shuffle=True, load_ms=True)\nval_dataset = load_dataset(\n '../../0_data/hdf/val.h5', batch_size=16, load_ms=True)\ntest_dataset = load_dataset(\n '../../0_data/hdf/test.h5', batch_size=16, load_ms=True)\n\n\ncheckpoint_callback = ModelCheckpoint(monitor='val_loss', save_top_k=1)\ntrainer = pl.Trainer(gpus=1,\n progress_bar_refresh_rate=1,\n callbacks=[EarlyStopping(\n monitor='val_loss', patience=20)],\n checkpoint_callback=checkpoint_callback)\n\n# pl.seed_everything(420)\nmodel = PointCNN()\ntrainer.fit(model, train_dataset, val_dataset)\nbest_model = checkpoint_callback.best_model_path\nprint(best_model)\ncall_home(f'Done learning {MODEL_NAME}: ' + best_model)\n\nres = []\n\nmodel = PointCNN.load_from_checkpoint(checkpoint_path=best_model)\n# pl.seed_everything(420)\ntrainer.test(model, test_dataloaders=test_dataset)\n\nwith open(f'./results/{MODEL_NAME}_results.pickle', 'wb') as file:\n pickle.dump(res, file)\n\nlogits = list(chain(*(r.exp().argmax(axis=1).tolist() for r in res)))\nground = list(chain(*(tmp.y.tolist() for tmp in test_dataset)))\n\nclassification_report = ClRp(ground,\n logits,\n target_names=['coniferous',\n 'decidious',\n 'snag',\n 'dead tree'],\n digits=3)\nprint(classification_report)\nwith open(f'./results/{MODEL_NAME}_results.txt', 'w') as file:\n file.writelines(classification_report)\n file.writelines(best_model)\n\nCMDisplay(metrics.confusion_matrix(ground, logits)).plot()\nplt.savefig(f'./results/{MODEL_NAME}_results.eps', bbox_inches='tight')\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.functional.dropout", "torch.cat", "torch.nn.functional.nll_loss", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.savefig", "torch.nn.Linear", "sklearn.metrics.classification_report" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]